summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c7
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/airoha/Kconfig27
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c (renamed from drivers/net/ethernet/mediatek/airoha_eth.c)1370
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h552
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c520
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h34
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c910
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c181
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h803
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c47
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c44
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h8
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c25
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c728
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c85
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h143
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1089
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h52
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c89
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.h132
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c231
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c76
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h17
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c51
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c343
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c87
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c7
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c25
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c52
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h94
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c70
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c45
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c90
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c384
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c110
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c41
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c31
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h122
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c348
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c298
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c55
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h105
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c48
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h35
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c245
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c485
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h239
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c203
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c102
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c515
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c119
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c38
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c150
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c122
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c201
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c225
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig8
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c81
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c715
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h84
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c174
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c882
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c109
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c50
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c356
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h35
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c269
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h33
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c71
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c90
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c82
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c8
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/mae.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c115
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h22
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13842
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c178
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c233
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c564
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c344
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c252
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c63
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c422
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c137
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c4
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c105
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c236
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c142
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c883
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h135
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c20
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c56
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c16
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h14
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c315
458 files changed, 26504 insertions, 20324 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 977b42bc1e8c..f86d4557d8d7 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 99fa180dedb8..67182339469a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 115f48b3342c..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 68fad5575fd4..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..1a4cf6a259f6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 09f448f29124..c0a642568ac1 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -3,925 +3,30 @@
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
-#include <linux/etherdevice.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
-#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
-#define AIROHA_MAX_NUM_GDM_PORTS 1
-#define AIROHA_MAX_NUM_QDMA 2
-#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
-#define AIROHA_MAX_MTU 2000
-#define AIROHA_MAX_PACKET_SIZE 2048
-#define AIROHA_NUM_QOS_CHANNELS 4
-#define AIROHA_NUM_QOS_QUEUES 8
-#define AIROHA_NUM_TX_RING 32
-#define AIROHA_NUM_RX_RING 32
-#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
- AIROHA_NUM_QOS_CHANNELS)
-#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
-#define AIROHA_FE_MC_MAX_VLAN_PORT 16
-#define AIROHA_NUM_TX_IRQ 2
-#define HW_DSCP_NUM 2048
-#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
-#define TX_DSCP_NUM 1024
-#define RX_DSCP_NUM(_n) \
- ((_n) == 2 ? 128 : \
- (_n) == 11 ? 128 : \
- (_n) == 15 ? 128 : \
- (_n) == 0 ? 1024 : 16)
-
-#define PSE_RSV_PAGES 128
-#define PSE_QUEUE_RSV_PAGES 64
-
-#define QDMA_METER_IDX(_n) ((_n) & 0xff)
-#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
-
-/* FE */
-#define PSE_BASE 0x0100
-#define CSR_IFC_BASE 0x0200
-#define CDM1_BASE 0x0400
-#define GDM1_BASE 0x0500
-#define PPE1_BASE 0x0c00
-
-#define CDM2_BASE 0x1400
-#define GDM2_BASE 0x1500
-
-#define GDM3_BASE 0x1100
-#define GDM4_BASE 0x2500
-
-#define GDM_BASE(_n) \
- ((_n) == 4 ? GDM4_BASE : \
- (_n) == 3 ? GDM3_BASE : \
- (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-
-#define REG_FE_DMA_GLO_CFG 0x0000
-#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
-#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
-
-#define REG_FE_RST_GLO_CFG 0x0004
-#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
-#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
-#define FE_RST_CORE_MASK BIT(0)
-
-#define REG_FE_WAN_MAC_H 0x0030
-#define REG_FE_LAN_MAC_H 0x0040
-
-#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
-#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
-
-#define REG_FE_CDM1_OQ_MAP0 0x0050
-#define REG_FE_CDM1_OQ_MAP1 0x0054
-#define REG_FE_CDM1_OQ_MAP2 0x0058
-#define REG_FE_CDM1_OQ_MAP3 0x005c
-
-#define REG_FE_PCE_CFG 0x0070
-#define PCE_DPI_EN_MASK BIT(2)
-#define PCE_KA_EN_MASK BIT(1)
-#define PCE_MC_EN_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
-#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
-#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
-#define PSE_CFG_WR_EN_MASK BIT(8)
-#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
-#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
-
-#define PSE_FQ_CFG 0x008c
-#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
-
-#define REG_FE_PSE_BUF_SET 0x0090
-#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
-#define PSE_ALLRSV_MASK GENMASK(14, 0)
-
-#define REG_PSE_SHARE_USED_THD 0x0094
-#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
-#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
-
-#define REG_GDM_MISC_CFG 0x0148
-#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
-#define GDM2_CHN_VLD_MODE_MASK BIT(5)
-
-#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
-#define FE_IFC_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PORT_EN 0x01f0
-#define REG_FE_IFC_PORT_EN 0x01f4
-
-#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
-#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
-
-#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
-#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
-#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
-
-#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
-#define PATN_FCPU_EN_MASK BIT(7)
-#define PATN_SWP_EN_MASK BIT(6)
-#define PATN_DP_EN_MASK BIT(5)
-#define PATN_SP_EN_MASK BIT(4)
-#define PATN_TYPE_MASK GENMASK(3, 1)
-#define PATN_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
-#define PATN_DP_MASK GENMASK(31, 16)
-#define PATN_SP_MASK GENMASK(15, 0)
-
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
-
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_UCFQ_MASK GENMASK(15, 12)
-#define GDM_BCFQ_MASK GENMASK(11, 8)
-#define GDM_MCFQ_MASK GENMASK(7, 4)
-#define GDM_OCFQ_MASK GENMASK(3, 0)
-
-#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
-#define GDM_INGRESS_FC_EN_MASK BIT(1)
-#define GDM_STAG_EN_MASK BIT(0)
-
-#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
-#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
-#define GDM_LONG_LEN_MASK GENMASK(29, 16)
-
-#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
-#define FE_CPORT_PAD BIT(26)
-#define FE_CPORT_PORT_XFC_MASK BIT(25)
-#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
-
-#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
-#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
-#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
-#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
-#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
-#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
-#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
-#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
-#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
-#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
-#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
-#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
-#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
-#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
-#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
-#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
-#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
-#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
-#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
-#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
-#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
-#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
-#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
-#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
-#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
-#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
-#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
-#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
-#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
-#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
-#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
-#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
-#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
-
-#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
-#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
-#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
-#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
-#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG GDM4_BASE
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
-#define REG_IP_FRAG_FP 0x2010
-#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
-#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
-#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
-#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
-
-#define REG_MC_VLAN_EN 0x2100
-#define MC_VLAN_EN_MASK BIT(0)
-
-#define REG_MC_VLAN_CFG 0x2104
-#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
-#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
-#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
-#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
-#define MC_VLAN_CFG_RW_MASK BIT(0)
-
-#define REG_MC_VLAN_DATA 0x2108
-
-#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
-
-/* QDMA */
-#define REG_QDMA_GLOBAL_CFG 0x0004
-#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
-#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
-#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
-#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
-#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
-#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
-#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
-#define GLOBAL_CFG_RESET_MASK BIT(23)
-#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
-#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
-#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
-#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
-#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
-#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
-#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
-#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
-#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
-#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
-#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
-#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
-#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
-#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
-#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
-
-#define REG_FWD_DSCP_BASE 0x0010
-#define REG_FWD_BUF_BASE 0x0014
-
-#define REG_HW_FWD_DSCP_CFG 0x0018
-#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
-#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
-#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
-
-#define REG_INT_STATUS(_n) \
- (((_n) == 4) ? 0x0730 : \
- ((_n) == 3) ? 0x0724 : \
- ((_n) == 2) ? 0x0720 : \
- ((_n) == 1) ? 0x0024 : 0x0020)
-
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
-
-/* QDMA_CSR_INT_ENABLE1 */
-#define RX15_COHERENT_INT_MASK BIT(31)
-#define RX14_COHERENT_INT_MASK BIT(30)
-#define RX13_COHERENT_INT_MASK BIT(29)
-#define RX12_COHERENT_INT_MASK BIT(28)
-#define RX11_COHERENT_INT_MASK BIT(27)
-#define RX10_COHERENT_INT_MASK BIT(26)
-#define RX9_COHERENT_INT_MASK BIT(25)
-#define RX8_COHERENT_INT_MASK BIT(24)
-#define RX7_COHERENT_INT_MASK BIT(23)
-#define RX6_COHERENT_INT_MASK BIT(22)
-#define RX5_COHERENT_INT_MASK BIT(21)
-#define RX4_COHERENT_INT_MASK BIT(20)
-#define RX3_COHERENT_INT_MASK BIT(19)
-#define RX2_COHERENT_INT_MASK BIT(18)
-#define RX1_COHERENT_INT_MASK BIT(17)
-#define RX0_COHERENT_INT_MASK BIT(16)
-#define TX7_COHERENT_INT_MASK BIT(15)
-#define TX6_COHERENT_INT_MASK BIT(14)
-#define TX5_COHERENT_INT_MASK BIT(13)
-#define TX4_COHERENT_INT_MASK BIT(12)
-#define TX3_COHERENT_INT_MASK BIT(11)
-#define TX2_COHERENT_INT_MASK BIT(10)
-#define TX1_COHERENT_INT_MASK BIT(9)
-#define TX0_COHERENT_INT_MASK BIT(8)
-#define CNT_OVER_FLOW_INT_MASK BIT(7)
-#define IRQ1_FULL_INT_MASK BIT(5)
-#define IRQ1_INT_MASK BIT(4)
-#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
-#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
-#define IRQ0_FULL_INT_MASK BIT(1)
-#define IRQ0_INT_MASK BIT(0)
-
-#define TX_DONE_INT_MASK(_n) \
- ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
- : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_TX_MASK \
- (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
- IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
-/* QDMA_CSR_INT_ENABLE2 */
-#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
-#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
-#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
-#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
-#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
-#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
-#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
-#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
-#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
-#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
-#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
-#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
-#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
-#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
-#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
-#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
-#define RX15_DONE_INT_MASK BIT(15)
-#define RX14_DONE_INT_MASK BIT(14)
-#define RX13_DONE_INT_MASK BIT(13)
-#define RX12_DONE_INT_MASK BIT(12)
-#define RX11_DONE_INT_MASK BIT(11)
-#define RX10_DONE_INT_MASK BIT(10)
-#define RX9_DONE_INT_MASK BIT(9)
-#define RX8_DONE_INT_MASK BIT(8)
-#define RX7_DONE_INT_MASK BIT(7)
-#define RX6_DONE_INT_MASK BIT(6)
-#define RX5_DONE_INT_MASK BIT(5)
-#define RX4_DONE_INT_MASK BIT(4)
-#define RX3_DONE_INT_MASK BIT(3)
-#define RX2_DONE_INT_MASK BIT(2)
-#define RX1_DONE_INT_MASK BIT(1)
-#define RX0_DONE_INT_MASK BIT(0)
-
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
-
-/* QDMA_CSR_INT_ENABLE5 */
-#define TX31_COHERENT_INT_MASK BIT(31)
-#define TX30_COHERENT_INT_MASK BIT(30)
-#define TX29_COHERENT_INT_MASK BIT(29)
-#define TX28_COHERENT_INT_MASK BIT(28)
-#define TX27_COHERENT_INT_MASK BIT(27)
-#define TX26_COHERENT_INT_MASK BIT(26)
-#define TX25_COHERENT_INT_MASK BIT(25)
-#define TX24_COHERENT_INT_MASK BIT(24)
-#define TX23_COHERENT_INT_MASK BIT(23)
-#define TX22_COHERENT_INT_MASK BIT(22)
-#define TX21_COHERENT_INT_MASK BIT(21)
-#define TX20_COHERENT_INT_MASK BIT(20)
-#define TX19_COHERENT_INT_MASK BIT(19)
-#define TX18_COHERENT_INT_MASK BIT(18)
-#define TX17_COHERENT_INT_MASK BIT(17)
-#define TX16_COHERENT_INT_MASK BIT(16)
-#define TX15_COHERENT_INT_MASK BIT(15)
-#define TX14_COHERENT_INT_MASK BIT(14)
-#define TX13_COHERENT_INT_MASK BIT(13)
-#define TX12_COHERENT_INT_MASK BIT(12)
-#define TX11_COHERENT_INT_MASK BIT(11)
-#define TX10_COHERENT_INT_MASK BIT(10)
-#define TX9_COHERENT_INT_MASK BIT(9)
-#define TX8_COHERENT_INT_MASK BIT(8)
-
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-
-#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
-
-#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
-#define TX_IRQ_THR_MASK GENMASK(27, 16)
-#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
-
-#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
-#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
-
-#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
-#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
-#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
-
-#define REG_TX_RING_BASE(_n) \
- (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-
-#define REG_TX_RING_BLOCKING(_n) \
- (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-
-#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
-#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
-#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
-#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
-#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
-
-#define REG_TX_CPU_IDX(_n) \
- (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-
-#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_TX_DMA_IDX(_n) \
- (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-
-#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define IRQ_RING_IDX_MASK GENMASK(20, 16)
-#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_RING_BASE(_n) \
- (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-
-#define REG_RX_RING_SIZE(_n) \
- (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-
-#define RX_RING_THR_MASK GENMASK(31, 16)
-#define RX_RING_SIZE_MASK GENMASK(15, 0)
-
-#define REG_RX_CPU_IDX(_n) \
- (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-
-#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_DMA_IDX(_n) \
- (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-
-#define REG_RX_DELAY_INT_IDX(_n) \
- (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-
-#define RX_DELAY_INT_MASK GENMASK(15, 0)
-
-#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define REG_INGRESS_TRTCM_CFG 0x0070
-#define INGRESS_TRTCM_EN_MASK BIT(31)
-#define INGRESS_TRTCM_MODE_MASK BIT(30)
-#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
-#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
-
-#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
-#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-
-#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
-#define CNTR_EN_MASK BIT(31)
-#define CNTR_ALL_CHAN_EN_MASK BIT(30)
-#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
-#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
-#define CNTR_SRC_MASK GENMASK(27, 24)
-#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
-#define CNTR_CHAN_MASK GENMASK(7, 3)
-#define CNTR_QUEUE_MASK GENMASK(2, 0)
-
-#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
-
-#define REG_LMGR_INIT_CFG 0x1000
-#define LMGR_INIT_START BIT(31)
-#define LMGR_SRAM_MODE_MASK BIT(30)
-#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
-#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
-
-#define REG_FWD_DSCP_LOW_THR 0x1004
-#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
-
-#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(31)
-#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
-#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
-#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
-
-#define REG_EGRESS_TRTCM_CFG 0x1010
-#define EGRESS_TRTCM_EN_MASK BIT(31)
-#define EGRESS_TRTCM_MODE_MASK BIT(30)
-#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define TRTCM_PARAM_RW_MASK BIT(31)
-#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
-#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
-#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
-#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
-#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
-
-#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
-#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
-#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
-
-#define REG_TXWRR_MODE_CFG 0x1020
-#define TWRR_WEIGHT_SCALE_MASK BIT(31)
-#define TWRR_WEIGHT_BASE_MASK BIT(3)
-
-#define REG_TXWRR_WEIGHT_CFG 0x1024
-#define TWRR_RW_CMD_MASK BIT(31)
-#define TWRR_RW_CMD_DONE BIT(30)
-#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
-#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
-#define TWRR_VALUE_MASK GENMASK(15, 0)
-
-#define REG_PSE_BUF_USAGE_CFG 0x1028
-#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
-
-#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
-#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
-
-#define REG_GLB_TRTCM_CFG 0x1080
-#define GLB_TRTCM_EN_MASK BIT(31)
-#define GLB_TRTCM_MODE_MASK BIT(30)
-#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define GLB_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_CNGST_CFG 0x10a0
-#define TXQ_CNGST_DROP_EN BIT(31)
-#define TXQ_CNGST_DEI_DROP_EN BIT(30)
-
-#define REG_SLA_TRTCM_CFG 0x1150
-#define SLA_TRTCM_EN_MASK BIT(31)
-#define SLA_TRTCM_MODE_MASK BIT(30)
-#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define SLA_FAST_TICK_MASK GENMASK(15, 0)
-
-/* CTRL */
-#define QDMA_DESC_DONE_MASK BIT(31)
-#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
-#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
-#define QDMA_DESC_DEI_MASK BIT(25)
-#define QDMA_DESC_NO_DROP_MASK BIT(24)
-#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
-/* DATA */
-#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
-/* TX MSG0 */
-#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
-#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
-#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
-#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
-#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
-#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
-#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
-#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
-#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
-#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
-/* TX MSG1 */
-#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
-#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
-#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
-#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
-#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
-#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
-#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
-#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
-#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
-
-/* RX MSG1 */
-#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
-#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
-#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
-#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
-#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
-#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
-#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
-#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
-#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-
-struct airoha_qdma_desc {
- __le32 rsv;
- __le32 ctrl;
- __le32 addr;
- __le32 data;
- __le32 msg0;
- __le32 msg1;
- __le32 msg2;
- __le32 msg3;
-};
-
-/* CTRL0 */
-#define QDMA_FWD_DESC_CTX_MASK BIT(31)
-#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
-#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
-#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
-/* CTRL1 */
-#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
-/* CTRL2 */
-#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
-
-struct airoha_qdma_fwd_desc {
- __le32 addr;
- __le32 ctrl0;
- __le32 ctrl1;
- __le32 ctrl2;
- __le32 msg0;
- __le32 msg1;
- __le32 rsv0;
- __le32 rsv1;
-};
-
-enum {
- QDMA_INT_REG_IDX0,
- QDMA_INT_REG_IDX1,
- QDMA_INT_REG_IDX2,
- QDMA_INT_REG_IDX3,
- QDMA_INT_REG_IDX4,
- QDMA_INT_REG_MAX
-};
-
-enum {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_AE_PORT,
- XSI_ETH_PORT,
-};
-
-enum {
- XSI_PCIE0_VIP_PORT_MASK = BIT(22),
- XSI_PCIE1_VIP_PORT_MASK = BIT(23),
- XSI_USB_VIP_PORT_MASK = BIT(25),
- XSI_ETH_VIP_PORT_MASK = BIT(24),
-};
-
-enum {
- DEV_STATE_INITIALIZED,
-};
-
-enum {
- CDM_CRSN_QSEL_Q1 = 1,
- CDM_CRSN_QSEL_Q5 = 5,
- CDM_CRSN_QSEL_Q6 = 6,
- CDM_CRSN_QSEL_Q15 = 15,
-};
-
-enum {
- CRSN_08 = 0x8,
- CRSN_21 = 0x15, /* KA */
- CRSN_22 = 0x16, /* hit bind and force route to CPU */
- CRSN_24 = 0x18,
- CRSN_25 = 0x19,
-};
-
-enum {
- FE_PSE_PORT_CDM1,
- FE_PSE_PORT_GDM1,
- FE_PSE_PORT_GDM2,
- FE_PSE_PORT_GDM3,
- FE_PSE_PORT_PPE1,
- FE_PSE_PORT_CDM2,
- FE_PSE_PORT_CDM3,
- FE_PSE_PORT_CDM4,
- FE_PSE_PORT_PPE2,
- FE_PSE_PORT_GDM4,
- FE_PSE_PORT_CDM5,
- FE_PSE_PORT_DROP = 0xf,
-};
-
-enum tx_sched_mode {
- TC_SCH_WRR8,
- TC_SCH_SP,
- TC_SCH_WRR7,
- TC_SCH_WRR6,
- TC_SCH_WRR5,
- TC_SCH_WRR4,
- TC_SCH_WRR3,
- TC_SCH_WRR2,
-};
-
-enum trtcm_param_type {
- TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
- TRTCM_TOKEN_RATE_MODE,
- TRTCM_BUCKETSIZE_SHIFT_MODE,
- TRTCM_BUCKET_COUNTER_MODE,
-};
-
-enum trtcm_mode_type {
- TRTCM_COMMIT_MODE,
- TRTCM_PEAK_MODE,
-};
-
-enum trtcm_param {
- TRTCM_TICK_SEL = BIT(0),
- TRTCM_PKT_MODE = BIT(1),
- TRTCM_METER_MODE = BIT(2),
-};
-
-#define MIN_TOKEN_SIZE 4096
-#define MAX_TOKEN_SIZE_OFFSET 17
-#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
-#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
-
-struct airoha_queue_entry {
- union {
- void *buf;
- struct sk_buff *skb;
- };
- dma_addr_t dma_addr;
- u16 dma_len;
-};
-
-struct airoha_queue {
- struct airoha_qdma *qdma;
-
- /* protect concurrent queue accesses */
- spinlock_t lock;
- struct airoha_queue_entry *entry;
- struct airoha_qdma_desc *desc;
- u16 head;
- u16 tail;
-
- int queued;
- int ndesc;
- int free_thr;
- int buf_size;
-
- struct napi_struct napi;
- struct page_pool *page_pool;
-};
-
-struct airoha_tx_irq_queue {
- struct airoha_qdma *qdma;
-
- struct napi_struct napi;
-
- int size;
- u32 *q;
-};
-
-struct airoha_hw_stats {
- /* protect concurrent hw_stats accesses */
- spinlock_t lock;
- struct u64_stats_sync syncp;
-
- /* get_stats64 */
- u64 rx_ok_pkts;
- u64 tx_ok_pkts;
- u64 rx_ok_bytes;
- u64 tx_ok_bytes;
- u64 rx_multicast;
- u64 rx_errors;
- u64 rx_drops;
- u64 tx_drops;
- u64 rx_crc_error;
- u64 rx_over_errors;
- /* ethtool stats */
- u64 tx_broadcast;
- u64 tx_multicast;
- u64 tx_len[7];
- u64 rx_broadcast;
- u64 rx_fragment;
- u64 rx_jabber;
- u64 rx_len[7];
-};
-
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
-
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
+#include "airoha_regs.h"
+#include "airoha_eth.h"
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
-};
-
-struct airoha_gdm_port {
- struct airoha_qdma *qdma;
- struct net_device *dev;
- int id;
-
- struct airoha_hw_stats stats;
-
- DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-
- /* qos stats counters */
- u64 cpu_tx_packets;
- u64 fwd_tx_packets;
-};
-
-struct airoha_eth {
- struct device *dev;
-
- unsigned long state;
- void __iomem *fe_regs;
-
- struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-
- struct net_device *napi_dev;
-
- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-};
-
-static u32 airoha_rr(void __iomem *base, u32 offset)
+u32 airoha_rr(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
{
writel(val, base + offset);
}
-static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
{
val |= (airoha_rr(base, offset) & ~mask);
airoha_wr(base, offset, val);
@@ -929,28 +34,6 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-#define airoha_fe_rr(eth, offset) \
- airoha_rr((eth)->fe_regs, (offset))
-#define airoha_fe_wr(eth, offset, val) \
- airoha_wr((eth)->fe_regs, (offset), (val))
-#define airoha_fe_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-#define airoha_fe_set(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-#define airoha_fe_clear(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-
-#define airoha_qdma_rr(qdma, offset) \
- airoha_rr((qdma)->regs, (offset))
-#define airoha_qdma_wr(qdma, offset, val) \
- airoha_wr((qdma)->regs, (offset), (val))
-#define airoha_qdma_rmw(qdma, offset, mask, val) \
- airoha_rmw((qdma)->regs, (offset), (mask), (val))
-#define airoha_qdma_set(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), 0, (val))
-#define airoha_qdma_clear(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), (val), 0)
-
static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
@@ -1021,30 +104,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
FIELD_PREP(GDM_UCFQ_MASK, val));
}
-static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
{
- u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
- u32 vip_port, cfg_addr;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
- switch (port) {
- case XSI_PCIE0_PORT:
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_PCIE1_PORT:
- vip_port = XSI_PCIE1_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_USB_PORT:
- vip_port = XSI_USB_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
- case XSI_ETH_PORT:
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
default:
- return -EINVAL;
+ return 0;
}
if (enable) {
@@ -1055,51 +131,17 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
}
- airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-
return 0;
}
-static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-{
- const int port_list[] = {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_ETH_PORT
- };
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(port_list); i++) {
- err = airoha_set_gdm_port(eth, port_list[i], enable);
- if (err)
- goto error;
- }
-
- return 0;
-
-error:
- for (i--; i >= 0; i--)
- airoha_set_gdm_port(eth, port_list[i], false);
-
- return err;
-}
-
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
GDM_DROP_CRC_ERR);
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
- FE_PSE_PORT_CDM1);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
- }
airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
@@ -1547,7 +589,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
- case 0x10 ... 0x13:
+ case 0x10 ... 0x14:
port = 0;
break;
case 0x2 ... 0x4:
@@ -1571,10 +613,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb;
- int len, p;
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
@@ -1592,32 +636,74 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
dma_sync_single_for_cpu(eth->dev, dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (data_len < len)
+ goto free_frag;
+
p = airoha_qdma_get_gdm_port(eth, desc);
- if (p < 0 || !eth->ports[p]) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- continue;
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
}
- skb = napi_build_skb(e->buf, q->buf_size);
- if (!skb) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- break;
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
}
- skb_reserve(skb, 2);
- __skb_put(skb, len);
- skb_mark_for_recycle(skb);
- skb->dev = eth->ports[p]->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, qid);
- napi_gro_receive(&q->napi, skb);
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(eth->ppe, hash);
done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ page_pool_put_full_page(q->page_pool, page, true);
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
}
airoha_qdma_fill_rx_queue(q);
@@ -1692,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q);
@@ -2091,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -2235,6 +1321,10 @@ static int airoha_hw_init(struct platform_device *pdev,
return err;
}
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
set_bit(DEV_STATE_INITIALIZED, &eth->state);
return 0;
@@ -2441,12 +1531,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(qdma->eth, true);
+ err = airoha_set_vip_for_gdm_port(port, true);
if (err)
return err;
@@ -2457,9 +1547,15 @@ static int airoha_dev_open(struct net_device *dev)
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
return 0;
}
@@ -2471,20 +1567,24 @@ static int airoha_dev_stop(struct net_device *dev)
int i, err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(qdma->eth, false);
+ err = airoha_set_vip_for_gdm_port(port, false);
if (err)
return err;
- airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_TX_DMA_EN_MASK |
- GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
- if (!qdma->q_tx[i].ndesc)
- continue;
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
- airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
- netdev_tx_reset_subqueue(dev, i);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
}
return 0;
@@ -2504,12 +1604,82 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
+static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 chan = port->id == 3 ? 4 : 0;
+
+ /* Forward the traffic to the proper GDM port */
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ if (port->id == 3) {
+ /* FIXME: handle XSI_PCE1_PORT */
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
+ SP_CPORT_PCIE0_MASK,
+ FIELD_PREP(SP_CPORT_PCIE0_MASK,
+ FE_PSE_PORT_CDM2));
+ } else {
+ /* FIXME: handle XSI_USB_PORT */
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
+ SP_CPORT_ETH_MASK,
+ FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
+ }
+}
+
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 pse_port;
airoha_set_macaddr(port, dev->dev_addr);
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1])
+ airhoha_set_gdm2_loopback(port);
+ fallthrough;
+ case 2:
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ default:
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+
return 0;
}
@@ -2535,6 +1705,20 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -2553,26 +1737,71 @@ static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue < dev->num_tx_queues ? queue : 0;
}
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
- u32 nr_frags = 1 + sinfo->nr_frags;
+ u32 nr_frags, tag, msg0, msg1, len;
struct netdev_queue *txq;
struct airoha_queue *q;
- void *data = skb->data;
+ void *data;
int i, qid;
u16 index;
u8 fport;
qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
qid / AIROHA_NUM_QOS_QUEUES) |
FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
- qid % AIROHA_NUM_QOS_QUEUES);
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2583,8 +1812,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto error;
- if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- __be16 csum = cpu_to_be16(sinfo->gso_size);
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
tcp_hdr(skb)->check = (__force __sum16)csum;
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
@@ -2602,6 +1832,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
spin_lock_bh(&q->lock);
txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
if (q->queued + nr_frags > q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
@@ -2609,11 +1841,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ len = skb_headlen(skb);
+ data = skb->data;
index = q->head;
+
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
struct airoha_queue_entry *e = &q->entry[index];
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -3038,6 +2273,47 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
return 0;
}
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
{
struct net_device *dev = port->dev;
@@ -3121,6 +2397,9 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
return airoha_tc_setup_qdisc_ets(port, type_data);
case TC_SETUP_QDISC_HTB:
return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3130,6 +2409,7 @@ static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
.ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
@@ -3143,13 +2423,45 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
};
-static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
struct airoha_qdma *qdma;
struct net_device *dev;
- int err, index;
+ int err, p;
u32 id;
if (!id_ptr) {
@@ -3158,14 +2470,14 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
id = be32_to_cpup(id_ptr);
- index = id - 1;
+ p = id - 1;
if (!id || id > ARRAY_SIZE(eth->ports)) {
dev_err(eth->dev, "invalid gdm port id: %d\n", id);
return -EINVAL;
}
- if (eth->ports[index]) {
+ if (eth->ports[p]) {
dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
return -EINVAL;
}
@@ -3188,6 +2500,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HW_TC;
dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
@@ -3213,7 +2526,11 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port->qdma = qdma;
port->dev = dev;
port->id = id;
- eth->ports[index] = port;
+ eth->ports[p] = port;
+
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
return register_netdev(dev);
}
@@ -3281,6 +2598,7 @@ static int airoha_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -3288,7 +2606,7 @@ static int airoha_probe(struct platform_device *pdev)
if (!of_device_is_available(np))
continue;
- err = airoha_alloc_gdm_port(eth, np);
+ err = airoha_alloc_gdm_port(eth, np, i++);
if (err) {
of_node_put(np);
goto error_napi_stop;
@@ -3307,8 +2625,10 @@ error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED)
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
@@ -3334,9 +2654,11 @@ static void airoha_remove(struct platform_device *pdev)
airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
+ airoha_ppe_deinit(eth);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..60690b685710
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_NUM 2
+#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
+#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_PCIE1_SRCPORT,
+ HSGMII_LAN_ETH_SRCPORT,
+ HSGMII_LAN_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+struct airoha_flow_table_entry {
+ struct hlist_node list;
+
+ struct airoha_foe_entry data;
+ u32 hash;
+
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ atomic_t users;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..7a5710f9ccf6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_npu.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ int ppe_type;
+ int wan_mode;
+ int wan_sel;
+ } init_info;
+ struct {
+ int func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ };
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ void *addr;
+ int ret;
+
+ addr = kmemdup(p, size, GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+out:
+ kfree(addr);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct reserved_mem *rmem)
+{
+ const struct firmware *fw;
+ void __iomem *addr;
+ int ret;
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_RV32, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ addr = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_DATA, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ .init_info = {
+ .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
+ .wan_mode = QDMA_WAN_ETHER,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .func_id = PPE_SRAM_RESET_VAL,
+ .data = foe_addr,
+ .size = sram_num_entries,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .data = foe_addr,
+ .size = entry_size,
+ },
+ };
+ int err;
+
+ ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+ if (err)
+ return err;
+
+ ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data.set_info.data = hash;
+ ppe_data.set_info.size = sizeof(u32);
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *rmem;
+ struct airoha_npu *npu;
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ np = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, rmem);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ /* do not start core3 since it is used for WiFi offloading */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
new file mode 100644
index 000000000000..a2b8ae4d9473
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#define NPU_NUM_CORES 8
+
+struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ } ops;
+};
+
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..8b55e871352d
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_npu.h"
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+{
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_tb_size, sram_num_entries, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i;
+
+ sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ for (i = 0; i < PPE_NUM; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+
+ if (airoha_ppe2_is_enabled(eth)) {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ } else {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ int dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u32 qdata, ports_pad, val;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
+
+ if (dev) {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2 loopback */
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ hwe->bridge.dest_mac_lo =
+ get_unaligned_be16(data->eth.h_dest + 4);
+ hwe->bridge.src_mac_hi =
+ get_unaligned_be16(data->eth.h_source);
+ hwe->bridge.src_mac_lo =
+ get_unaligned_be32(data->eth.h_source + 2);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
+ }
+
+ if (data->vlan.num) {
+ l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(15) | BIT(dsa_port);
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ l2->etype = ETH_P_IPV6;
+ } else {
+ l2->etype = ETH_P_IP;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= PPE_NUM_ENTRIES - 1;
+
+ return hash;
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ struct airoha_eth *eth = ppe->eth;
+ bool ppe2;
+ u32 val;
+ int i;
+
+ ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+ bool ppe2 = airoha_ppe2_is_enabled(eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ struct airoha_npu *npu;
+ int err = -ENODEV;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu)
+ err = npu->ops.ppe_foe_commit_entry(npu, addr,
+ sizeof(*hwe), hash,
+ ppe2);
+ rcu_read_unlock();
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_entry *hwe;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (airoha_ppe_foe_compare_entry(e, hwe)) {
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ e->hash = hash;
+ break;
+ }
+ }
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
+
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(port, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(port, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
+ struct airoha_foe_entry *hwe = ppe->foe;
+
+ if (airoha_ppe2_is_enabled(ppe->eth))
+ sram_num_entries = sram_num_entries / 2;
+
+ for (i = 0; i < sram_num_entries; i++)
+ memset(&hwe[i], 0, sizeof(*hwe));
+
+ return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
+ PPE_SRAM_NUM_ENTRIES);
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ int err;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ airoha_ppe_hw_init(eth->ppe);
+ err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (err)
+ goto error_npu_put;
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+ if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(port, cls);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
+{
+ u16 now, diff;
+
+ if (hash > PPE_HASH_MASK)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, hash);
+}
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ struct airoha_ppe *ppe;
+ int foe_size, err;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..3cdc6fd53fc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < PPE_NUM_ENTRIES; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..8146cde4e8ba
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_STRIP_CRC BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG GDM4_BASE
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
+#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
+#define SP_CPORT_USB_MASK GENMASK(7, 4)
+#define SP_CPORT_ETH_MASK GENMASK(7, 4)
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_n) \
+ (((_n) == 4) ? 0x0750 : \
+ ((_n) == 3) ? 0x0744 : \
+ ((_n) == 2) ? 0x0740 : \
+ ((_n) == 1) ? 0x002c : 0x0028)
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_IDX0_MASK \
+ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+ RX15_COHERENT_INT_MASK | INT_TX_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_DONE_INT_MASK \
+ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+ RX15_DONE_INT_MASK)
+#define INT_IDX1_MASK \
+ (RX_DONE_INT_MASK | \
+ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+ RX15_NO_CPU_DSCP_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define INT_IDX4_MASK \
+ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c1295dfad0d0..70fa3adb4934 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1742,16 +1715,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -4131,13 +4101,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
-
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 0671a066913b..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..eeb72b1809ea 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -175,34 +175,32 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ padev->client_id = 0;
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +209,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +223,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
- * an error if there is no auxbus device support for this
- * VF, it just means something else needs to happen with it.
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +252,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..1eb0d92786f7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -402,6 +402,9 @@ err_out_uninit:
}
static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
.vif_id = PDS_DEV_TYPE_VDPA,
.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
@@ -428,6 +431,10 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
+ if (vt == PDS_DEV_TYPE_FWCTL)
+ pdsc->viftype_status[vt].enabled = true;
+
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..0bf320c43083 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -156,6 +156,7 @@ struct pdsc {
struct dentry *dentry;
struct device *dev;
struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
struct pdsc_vf *vfs;
int num_vfs;
int vf_id;
@@ -303,8 +304,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 44971e71991f..c5c787df61a4 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..4843f9249a31 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -264,6 +265,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
mutex_unlock(&pdsc->config_lock);
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
dl = priv_to_devlink(pdsc);
devl_lock(dl);
err = devl_params_register(dl, pdsc_dl_params,
@@ -272,7 +277,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
ERR_PTR(err));
- goto err_out_stop;
+ goto err_out_del_dev;
}
hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
@@ -295,6 +300,8 @@ static int pdsc_init_pf(struct pdsc *pdsc)
err_out_unreg_params:
devlink_params_unregister(dl, pdsc_dl_params,
ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
err_out_stop:
pdsc_stop(pdsc);
err_out_teardown:
@@ -417,7 +424,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -426,6 +433,7 @@ static void pdsc_remove(struct pci_dev *pdev)
* shut themselves down.
*/
pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
@@ -482,7 +490,10 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +538,11 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
}
}
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 55f553debd3b..934ba9425857 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,7 +54,10 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -76,6 +79,7 @@
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
@@ -485,6 +489,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -564,7 +579,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -639,7 +654,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -3323,74 +3338,81 @@ poll_done:
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->tx_ring)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
}
@@ -5245,8 +5267,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5574,6 +5598,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -6944,6 +6970,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6995,37 +7045,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7206,6 +7232,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
return 0;
}
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -7242,33 +7301,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7281,20 +7324,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_ring_struct *ring;
- u32 map_idx = bnapi->index;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
@@ -7362,6 +7394,23 @@ exit:
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -7406,6 +7455,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7414,20 +7490,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7451,17 +7515,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -8374,6 +8430,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -9538,6 +9595,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -11246,6 +11305,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -11268,11 +11476,18 @@ static void bnxt_free_irq(struct bnxt *bp)
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -11292,6 +11507,12 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
+
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -11310,13 +11531,16 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
@@ -11326,6 +11550,16 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -11346,9 +11580,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -11367,12 +11601,12 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
- bnapi->index);
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11393,7 +11627,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
- napi_disable(&bnapi->napi);
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -11415,7 +11649,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -12086,6 +12320,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
struct hwrm_func_drv_if_change_input *req;
bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
u32 flags = 0;
@@ -12141,8 +12376,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
@@ -12578,7 +12816,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12589,14 +12826,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -12627,12 +12864,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -12741,10 +12978,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12835,7 +13073,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -13754,30 +13992,31 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -13785,9 +14024,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -13826,7 +14065,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -13882,7 +14121,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13925,9 +14164,9 @@ void bnxt_fw_exception(struct bnxt *bp)
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13957,7 +14196,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -13980,7 +14219,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -14003,7 +14242,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -14202,7 +14441,7 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
@@ -14595,7 +14834,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -14630,10 +14869,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
bnxt_fw_reset_close(bp);
@@ -14644,7 +14883,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -14718,7 +14957,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -14726,7 +14965,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
@@ -14745,13 +14984,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -14764,9 +15003,9 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
ulp_start:
bnxt_ulp_start(bp, rc);
}
@@ -14858,13 +15097,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14885,11 +15125,12 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
@@ -15620,6 +15861,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i, rc;
rxr = &bp->rx_ring[idx];
@@ -15637,21 +15879,40 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
bnxt_copy_rx_ring(bp, rxr, clone);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
- return rc;
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
- goto err_free_hwrm_rx_ring;
+ goto err_reset;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- for (i = 0; i <= bp->nr_vnics; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
@@ -15667,8 +15928,12 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
return 0;
-err_free_hwrm_rx_ring:
- bnxt_hwrm_rx_ring_free(bp, rxr, false);
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -15676,10 +15941,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
- for (i = 0; i <= bp->nr_vnics; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic,
@@ -15688,14 +15955,30 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
- cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
- rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_disable_direct_recycling(rxr->head_pool);
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
@@ -16014,7 +16297,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -16027,7 +16310,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -16364,6 +16647,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
rc = register_netdev(dev);
if (rc)
@@ -16414,13 +16698,13 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (!dev)
return;
- rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
@@ -16432,7 +16716,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
- rtnl_unlock();
+ netdev_unlock(dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -16444,7 +16728,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -16453,7 +16737,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
@@ -16463,7 +16747,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -16506,7 +16790,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16541,7 +16825,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -16550,7 +16834,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
}
if (abort || state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -16569,7 +16853,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(netdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16599,7 +16883,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
- rtnl_lock();
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -16654,7 +16938,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -16673,7 +16957,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
@@ -16686,7 +16970,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ netdev_unlock(netdev);
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2373f423a523..21726cf56586 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -82,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -1234,6 +1240,11 @@ struct bnxt_irq {
u8 have_cpumask:1;
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -2410,6 +2421,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2492,6 +2505,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
u32 fw_dbg_cap;
@@ -2689,6 +2703,7 @@ struct bnxt {
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 7236d8e548ab..5576e7cf8463 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -159,8 +159,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -172,6 +172,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -450,7 +452,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ef8288fd68f4..777880594a04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -439,14 +440,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -ENODEV;
@@ -458,7 +462,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
@@ -479,7 +484,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -493,6 +500,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -511,6 +519,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -535,6 +545,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -550,6 +561,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -568,8 +580,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
bnxt_ulp_start(bp, rc);
@@ -666,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1010,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
+ int idx = 0, rc;
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1051,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1080,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp)) {
+ hwrm_req_drop(bp, req);
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -1116,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1180,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9c5820839514..48dd5922e4dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -4541,16 +4541,16 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
NL_SET_ERR_MSG_MOD(extack,
- "Module read not permitted on untrusted VF");
+ "Module read/write not permitted on untrusted VF");
return -EPERM;
}
@@ -4567,6 +4567,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4580,6 +4593,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -5077,8 +5146,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_DRIVER) {
- netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
@@ -5441,6 +5511,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5f8de1634378..549231703bce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2024 Broadcom Inc.
+ * Copyright (c) 2018-2025 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -438,6 +438,7 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
#define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
#define HWRM_PORT_POE_CFG 0x230UL
#define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
@@ -514,6 +515,8 @@ struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
#define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
@@ -629,8 +632,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 85
-#define HWRM_VERSION_STR "1.10.3.85"
+#define HWRM_VERSION_RSVD 97
+#define HWRM_VERSION_STR "1.10.3.97"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
__le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
__le16 max_roce_vfs;
- u8 unused_3[5];
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
u8 valid;
};
@@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1280b/160B) */
+/* hwrm_func_qcfg_output (size:1344b/168B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 flags2;
#define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- u8 unused_4[2];
+ __le16 stag_vid;
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_5[2];
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
u8 lag_id;
@@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output {
__le16 xid_partition_cfg;
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- u8 unused_7;
+ __le16 mirror_vnic_id;
+ u8 unused_7[7];
u8 valid;
};
@@ -3965,7 +3977,7 @@ struct ts_split_entries {
__le32 region_num_entries;
u8 tsid;
u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
+ u8 locked;
__le32 rsvd2[2];
};
@@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
__le16 req_type;
@@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input {
__le32 flags;
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
__le16 virtio_net_fid;
- u8 unused_0[2];
+ __le16 vnic_id;
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
+ __le32 raw_qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output {
u8 valid;
};
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
__le16 req_type;
@@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
__le64 cq_handle;
};
@@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+};
+
/* hwrm_stat_generic_qstats_input (size:256b/32B) */
struct hwrm_stat_generic_qstats_input {
__le16 req_type;
@@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
u8 count;
__le16 subtype;
__le16 next_offset;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 12b6ed51fd88..5ddddd89052f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -946,7 +946,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -956,17 +958,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct bnxt *bp = netdev_priv(dev);
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index e4a7f37036ed..a8e930d5dbb0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -112,7 +112,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp *ulp;
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
@@ -138,7 +138,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -151,7 +151,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
@@ -169,7 +169,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
i++;
}
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
@@ -309,12 +309,14 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
+ netdev_unlock(bp->dev);
}
}
@@ -333,7 +335,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -345,6 +348,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
+ netdev_unlock(bp->dev);
kfree(ent);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..619f0844e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 299822cacca4..e675611777b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,6 +15,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -382,13 +382,14 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3e93f957430b..73d78dcb774d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -41,15 +41,13 @@
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +102,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +444,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +495,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +511,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +599,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +687,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +707,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +717,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +741,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +835,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +914,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -1120,7 +1128,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1438,7 +1446,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1481,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1514,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1651,9 +1660,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1685,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1700,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1728,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1771,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1785,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1877,12 +1865,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1916,19 +1899,46 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->packets += pkts_compl;
ring->bytes += bytes_compl;
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +1954,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,14 +1972,11 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
@@ -2057,19 +2064,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2242,15 +2244,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2399,7 +2394,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
if (ring->dim.use_dim) {
@@ -2523,7 +2518,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2588,8 +2583,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2639,15 +2634,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2658,8 +2644,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2697,13 +2683,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2749,15 +2728,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2765,13 +2740,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2779,82 +2751,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2862,15 +2856,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2878,15 +2868,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2894,13 +2880,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2908,57 +2891,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
-
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2966,26 +2924,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2994,39 +2935,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3038,32 +2951,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3113,6 +3047,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3142,7 +3085,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3161,7 +3104,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3169,20 +3112,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3190,12 +3133,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3209,29 +3150,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3285,56 +3205,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3358,7 +3228,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3384,22 +3253,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3446,19 +3309,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3506,16 +3371,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3529,7 +3389,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3542,25 +3402,20 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
@@ -3664,16 +3519,13 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
struct bcmgenet_rx_ring *rx_ring;
unsigned int q;
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
tx_ring = &priv->tx_rings[q];
tx_bytes += tx_ring->bytes;
tx_packets += tx_ring->packets;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
rx_ring = &priv->rx_rings[q];
rx_bytes += rx_ring->bytes;
@@ -3681,11 +3533,6 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
rx_errors += rx_ring->errors;
rx_dropped += rx_ring->dropped;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
@@ -3726,128 +3573,109 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3898,7 +3726,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3923,7 +3751,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3934,32 +3762,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -4057,7 +3896,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
+ priv->flags = pdata->flags;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4077,7 +3916,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4132,16 +3971,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4205,9 +4041,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4217,23 +4066,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4254,19 +4126,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4287,19 +4153,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..10c631bbe964 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -271,6 +274,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +481,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +502,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -513,7 +518,6 @@ struct bcmgenet_tx_ring {
unsigned long packets;
unsigned long bytes;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +527,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -553,8 +555,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -583,7 +583,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -593,10 +593,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -615,7 +616,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -643,13 +643,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -702,8 +726,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 3b082114f2e5..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -145,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -154,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- spin_lock_bh(&priv->reg_lock);
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- spin_unlock_bh(&priv->reg_lock);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -177,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -198,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
spin_lock_bh(&priv->reg_lock);
@@ -214,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -228,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
spin_lock_bh(&priv->reg_lock);
@@ -269,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..71c619d2bea5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -154,7 +154,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -184,7 +184,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ece6f3b48327..3b9107003b00 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 2847278d9cd4..c9a5c8beb2fa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -951,75 +951,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1027,7 +1025,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c1f57d96e63f..1fe8ec37491b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/ip.h>
@@ -853,9 +850,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -990,8 +985,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1081,15 +1076,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1142,8 +1140,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1160,6 +1160,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1230,6 +1233,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
@@ -1271,6 +1275,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1285,6 +1290,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -2388,6 +2396,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3023,6 +3033,7 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
@@ -3073,7 +3084,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3086,7 +3097,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *(p++) += ((u64)val) << 32;
}
}
@@ -3096,16 +3107,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
-
- if (!netif_running(bp->dev))
- return nstat;
spin_lock_irq(&bp->stats_lock);
- gem_update_stats(bp);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3135,8 +3143,6 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
spin_unlock_irq(&bp->stats_lock);
-
- return nstat;
}
static void gem_get_ethtool_stats(struct net_device *dev,
@@ -3188,14 +3194,17 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
spin_lock_irq(&bp->stats_lock);
@@ -3233,8 +3242,170 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
- return nstat;
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3763,6 +3934,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3781,6 +3956,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3917,7 +4096,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -4578,7 +4757,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ad49aea2673..ff8f2f9f9cae 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1252,9 +1252,9 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 6b6cb73482d7..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
}
EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
-
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index d26364c2ac81..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index c7c2c15a1815..95e6f015a6af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1211,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1233,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f0b3e389e62..551c279dc14b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6538,26 +6538,6 @@ out_unlock:
mutex_unlock(&uld_mutex);
}
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
- return ret;
-}
-
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
@@ -6583,7 +6563,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..baba96883f48 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,7 +71,6 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
@@ -85,7 +84,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -323,20 +321,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 10b7e02ba4d0..9c12e967e9f1 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -30,6 +31,13 @@
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -75,6 +83,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -158,6 +170,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
struct enic_wq {
@@ -169,6 +182,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -223,9 +237,9 @@ struct enic {
unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -347,5 +361,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index d607b4f0542c..529160926a96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 49f6cab01ed5..54aa3953bf7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -58,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- enic->wq[wq->index].stats.cq_work++;
- enic->wq[wq->index].stats.cq_bytes += buf->len;
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq[q_number].lock);
-
- vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
- netif_wake_subqueue(enic->netdev, q_number);
- enic->wq[q_number].stats.wake++;
- }
-
- spin_unlock(&enic->wq[q_number].lock);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -1313,243 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq[rq->index].stats.no_skb++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- rqstats->packets++;
- if (skipped) {
- rqstats->desc_skip++;
- return;
- }
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- rqstats->bad_fcs++;
- else if (bytes_written == 0)
- rqstats->pkt_truncated++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
- rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- rqstats->l4_rss_hash++;
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- rqstats->l3_rss_hash++;
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- if (encap)
- rqstats->csum_unnecessary_encap++;
- else
- rqstats->csum_unnecessary++;
- }
-
- if (vlan_stripped) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
- rqstats->vlan_stripped++;
- }
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
- rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1620,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1724,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1754,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1972,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1989,6 +1709,16 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -2029,8 +1759,11 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2088,8 +1821,11 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2405,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2431,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2599,6 +2337,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2626,6 +2365,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -2803,6 +2543,8 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ enic_ext_cq(enic);
+
err = enic_alloc_enic_resources(enic);
if (err) {
dev_err(dev, "Failed to allocate enic resources\n");
@@ -3179,7 +2921,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 126125199833..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -312,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -326,6 +334,24 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
@@ -348,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -380,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..517a15904fb0 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -1833,9 +1834,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 27e01d780cd0..75eac18ff246 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 0d030cb0b21c..625245b0845c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -221,20 +221,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -852,8 +851,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c4ce8e9a26..a86cfebedaa8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1093,6 +1093,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1109,17 +1132,7 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
@@ -1373,22 +1386,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..876d90832596 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -30,7 +30,6 @@
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "fec.h"
@@ -739,8 +738,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index b3e2a596ad2c..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 435138f4699d..deb35b38c976 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1647,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1675,9 +1666,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 88510f822759..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3408,7 +3408,7 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
-struct phylink_mac_ops ugeth_mac_ops = {
+static const struct phylink_mac_ops ugeth_mac_ops = {
.mac_link_up = ugeth_mac_link_up,
.mac_link_down = ugeth_mac_link_down,
.mac_config = ugeth_mac_config,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 38789faae706..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -890,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 78d2a19593d1..2fab38c8ee78 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -59,6 +59,8 @@
#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -68,6 +70,9 @@
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -102,7 +107,13 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
@@ -218,6 +229,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -226,7 +242,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -604,8 +619,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -624,10 +637,18 @@ struct gve_notify_block {
u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
+ u16 max_queues;
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -651,11 +672,11 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
+
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -665,13 +686,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -722,6 +745,11 @@ struct gve_flow_rules_cache {
u32 rule_ids_cache_num;
};
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -751,9 +779,8 @@ struct gve_priv {
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -823,7 +850,6 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -842,6 +868,8 @@ struct gve_priv {
u16 rss_key_size;
u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
};
enum gve_service_task_flags_bit {
@@ -1024,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1062,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1072,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
@@ -1103,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1207,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx);
+ struct gve_rx_ring *rx,
+ bool xdp);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
@@ -1219,14 +1238,17 @@ int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index aa7d723011d0..3e8fc33cc11f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->data.data_bus);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->dqo.complq.bus);
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd->create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
@@ -885,6 +883,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
+}
+
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
@@ -968,6 +975,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
}
}
@@ -1052,6 +1063,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
+ gve_set_default_rss_sizes(priv);
+
/* DQO supports LRO. */
if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
@@ -1276,8 +1289,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
dma_addr_t lut_bus = 0, key_bus = 0;
- u16 key_size = 0, lut_size = 0;
union gve_adminq_command cmd;
__be32 *lut = NULL;
u8 hash_alg = 0;
@@ -1287,7 +1301,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
- break;
+ fallthrough;
case ETH_RSS_HASH_TOP:
hash_alg = ETH_RSS_HASH_TOP;
break;
@@ -1296,27 +1310,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
}
if (rxfh->indir) {
- lut_size = priv->rss_lut_size;
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
lut = dma_alloc_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
&lut_bus, GFP_KERNEL);
if (!lut)
return -ENOMEM;
for (i = 0; i < priv->rss_lut_size; i++)
- lut[i] = cpu_to_be32(rxfh->indir[i]);
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
}
if (rxfh->key) {
- key_size = priv->rss_key_size;
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
key = dma_alloc_coherent(&priv->pdev->dev,
- key_size, &key_bus, GFP_KERNEL);
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out;
}
- memcpy(key, rxfh->key, key_size);
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
}
/* Zero-valued fields in the cmd.configure_rss instruct the device to
@@ -1330,8 +1363,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
- .hash_key_size = cpu_to_be16(key_size),
- .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
@@ -1341,11 +1376,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
out:
if (lut)
dma_free_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
lut, lut_bus);
if (key)
dma_free_coherent(&priv->pdev->dev,
- key_size, key, key_bus);
+ priv->rss_key_size, key, key_bus);
return err;
}
@@ -1449,12 +1484,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv,
rxfh->hfunc = descriptor->hash_alg;
rss_info_addr = (void *)(descriptor + 1);
- if (rxfh->key)
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
rss_info_addr += priv->rss_key_size;
lut = (__be32 *)rss_info_addr;
if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
for (i = 0; i < priv->rss_lut_size; i++)
rxfh->indir[i] = be32_to_cpu(lut[i]);
}
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..a71883e1d920 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
buf_state->last_single_ref_offset = 0;
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -205,38 +206,40 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
- struct page *page = buf_state->page_info.page;
+ netmem_ref netmem = buf_state->page_info.netmem;
- if (!page)
+ if (!netmem)
return;
- page_pool_put_full_page(page->pp, page, allow_direct);
- buf_state->page_info.page = NULL;
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
}
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- struct gve_priv *priv = rx->gve;
- struct page *page;
+ netmem_ref netmem;
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
- page = page_pool_alloc(rx->dqo.page_pool,
- &buf_state->page_info.page_offset,
- &buf_state->page_info.buf_size, GFP_ATOMIC);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
- if (!page)
+ if (!netmem)
return -ENOMEM;
- buf_state->page_info.page = page;
- buf_state->page_info.page_address = page_address(page);
- buf_state->addr = page_pool_get_dma_addr(page);
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
return 0;
}
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx)
+ struct gve_rx_ring *rx,
+ bool xdp)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
@@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
return page_pool_create(&pp);
@@ -269,7 +273,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
- buf_state->page_info.page = NULL;
+ buf_state->page_info.netmem = 0;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index bdfc6e77b2af..31a21ccf4863 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -477,11 +475,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -492,22 +491,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_running(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -643,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -855,6 +858,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
return priv->rss_lut_size;
}
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -862,18 +884,46 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
return gve_adminq_query_rss_config(priv, rxfh);
}
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
- return gve_adminq_configure_rss(priv, rxfh);
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
}
const struct ethtool_ops gve_ethtool_ops = {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 92237fb0b60c..cb2f9978f45e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -184,6 +184,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv)
flow_rules_cache->rules_cache = NULL;
}
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -575,9 +612,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_rss_config_cache(priv);
if (err)
goto abort_with_flow_rule_caches;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -611,6 +651,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -629,6 +675,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
@@ -669,6 +717,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
@@ -746,30 +795,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx
return rx->dqo.qpl;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -787,30 +819,13 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -829,27 +844,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -905,7 +899,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -930,24 +924,21 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
- int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
-
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -955,12 +946,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -968,28 +958,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -1020,26 +988,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -1064,20 +1012,6 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
@@ -1204,7 +1138,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
int i, j;
u32 tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1215,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+ if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
@@ -1234,7 +1174,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
@@ -1255,7 +1195,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
{
int i, tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
return;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1268,7 +1208,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = NULL;
}
@@ -1285,15 +1225,14 @@ static void gve_drain_page_cache(struct gve_priv *priv)
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1366,17 +1305,13 @@ static int gve_queues_start(struct gve_priv *priv,
/* Record new configs into priv */
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1390,12 +1325,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1422,7 +1363,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1471,7 +1412,7 @@ static int gve_queues_stop(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1501,56 +1442,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto err;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1568,6 +1459,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1580,29 +1484,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
@@ -1736,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
static int verify_xdp_configuration(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
@@ -1748,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1786,6 +1692,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
@@ -1833,12 +1759,12 @@ int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@@ -1846,18 +1772,19 @@ int gve_adjust_queues(struct gve_priv *priv,
/* Relay the new config from ethtool */
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
-
- /* Add dedicated XDP TX queues if enabled. */
- num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
- tx_alloc_cfg.num_rings += num_xdp_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1886,7 +1813,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1897,7 +1824,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
@@ -1927,7 +1854,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@@ -1955,7 +1882,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@@ -1974,7 +1901,7 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
- if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
xdp_features_set_redirect_target(priv->dev, false);
gve_set_napi_enabled(priv);
@@ -2330,6 +2257,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2710,7 +2638,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2805,6 +2733,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2813,6 +2742,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2825,7 +2755,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index acb73d4d0de6..90e875c1832f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->db_threshold = slots / 2;
gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f0674a443567..dcb0545baa50 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -114,7 +114,8 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
if (!gve_rx_was_added_to_block(priv, idx))
return;
- page_pool_disable_direct_recycling(rx->dqo.page_pool);
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
@@ -223,6 +224,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
@@ -253,7 +263,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (cfg->raw_addressing) {
- pool = gve_rx_create_page_pool(priv, rx);
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
if (IS_ERR(pool))
goto err;
@@ -299,12 +309,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -333,7 +343,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -476,6 +486,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -513,14 +542,34 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
}
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ u64_stats_update_begin(&rx->statss);
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ rx->xdp_actions[xdp_act]++;
+ break;
+ case XDP_TX:
+ rx->xdp_tx_errors++;
+ break;
+ case XDP_REDIRECT:
+ rx->xdp_redirect_errors++;
+ break;
+ }
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -535,6 +584,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -561,7 +611,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -590,7 +645,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
@@ -602,6 +658,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog) {
+ struct xdp_buff xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ buf_state->page_info.pad += xdp.data - old_data;
+ buf_len = xdp.data_end - xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
@@ -632,9 +716,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4350ebd9c2bd..1b40bf0c811a 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -334,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -370,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -384,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -844,7 +837,7 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
return -ENETDOWN;
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -959,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
- goto out;
-
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- }
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 394debc62268..2eba868d8037 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 30fef100257e..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
- netif_napi_set_irq(&block->napi, block->irq);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index 7ea15f9ef849..1a9da564b306 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
- hbg_debugfs.o hbg_err.o
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index b4300d8ea4ad..f8cdab62bf85 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -36,6 +36,8 @@ enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
HBG_NIC_STATE_RESETTING,
HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
};
enum hbg_reset_type {
@@ -81,6 +83,7 @@ enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -104,6 +107,7 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
+ bool need_reset;
u64 count;
void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
@@ -142,6 +146,118 @@ struct hbg_user_def {
struct ethtool_pauseparam pause_param;
};
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+};
+
struct hbg_priv {
struct net_device *netdev;
struct pci_dev *pdev;
@@ -155,6 +271,12 @@ struct hbg_priv {
struct hbg_mac_filter filter;
enum hbg_reset_type reset_type;
struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
index 8473c43d171a..5e0ba4d5b08d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -67,10 +67,11 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
for (i = 0; i < priv->vectors.info_array_len; i++) {
info = &priv->vectors.info_array[i];
seq_printf(s,
- "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
info->name,
str_true_false(hbg_hw_irq_is_enabled(priv,
info->mask)),
+ str_true_false(info->need_reset),
str_true_false(info->need_print),
info->count);
}
@@ -114,6 +115,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
seq_printf(s, "last reset type: %s\n",
reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+ seq_printf(s, "np_link fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..d61c03f34ff0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->info_array[i].count;
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 4d1f4a33391a..4e8cb66f601c 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -105,6 +105,62 @@ int hbg_reset(struct hbg_priv *priv)
return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
}
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ hbg_reset(priv);
+
+ /* in hbg_pci_err_detected(), we will detach first,
+ * so we need to attach before open
+ */
+ if (!netif_device_present(priv->netdev))
+ netif_device_attach(priv->netdev);
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ hbg_err_reset(priv);
+ netif_device_attach(netdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -124,6 +180,8 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
}
static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
.reset_prepare = hbg_pci_err_reset_prepare,
.reset_done = hbg_pci_err_reset_done,
};
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
index d7828e446308..fb9fbe7004e8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -9,5 +9,6 @@
void hbg_set_pci_err_handler(struct pci_driver *pdrv);
int hbg_reset(struct hbg_priv *priv);
int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 00364a438ec2..8f1107b85fbb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -9,6 +9,136 @@
#include "hbg_ethtool.h"
#include "hbg_hw.h"
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
enum hbg_reg_dump_type {
HBG_DUMP_REG_TYPE_SPEC = 0,
HBG_DUMP_REG_TYPE_MDIO,
@@ -180,6 +310,167 @@ static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
return hbg_reset(priv);
}
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
+
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -190,6 +481,13 @@ static const struct ethtool_ops hbg_ethtool_ops = {
.set_pauseparam = hbg_ethtool_set_pauseparam,
.reset = hbg_ethtool_reset,
.nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index e7798f213645..74a18033b444 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -213,10 +213,20 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ hbg_np_link_fail_task_schedule(priv);
}
/* only support uc filter */
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..e79e9ab3e530 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -11,6 +11,9 @@ static void hbg_irq_handle_err(struct hbg_priv *priv,
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
@@ -25,30 +28,38 @@ static void hbg_irq_handle_rx(struct hbg_priv *priv,
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, 0, hbg_irq_handle_err}
static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index bb0f25ac9760..2ac5454338e4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,7 +5,9 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
@@ -14,6 +16,9 @@
#include "hbg_txrx.h"
#include "hbg_debugfs.h"
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
+
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
struct hbg_irq_info *info;
@@ -214,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -226,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -235,8 +277,62 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
.ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
static int hbg_mac_filter_init(struct hbg_priv *priv)
{
struct hbg_dev_specs *dev_specs = &priv->dev_specs;
@@ -291,6 +387,10 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
hbg_debugfs_init(priv);
hbg_init_user_def(priv);
return 0;
@@ -349,6 +449,9 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index db6bc4cfb971..f29a937ad087 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -17,6 +17,8 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -127,6 +129,26 @@ static void hbg_flowctrl_cfg(struct hbg_priv *priv)
hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
}
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ return;
+ }
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index f12efc12f3c5..cc2cc612770d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -18,6 +18,13 @@
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -54,12 +61,55 @@
#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
@@ -69,6 +119,9 @@
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
@@ -103,6 +156,7 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
@@ -111,12 +165,17 @@
#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
@@ -136,6 +195,9 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
@@ -178,5 +240,48 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..8d814c8f19ea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
@@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
- hbg_dma_unmap(buffer);
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free(buffer);
+ goto next_buffer;
+ }
+ hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6c458f037262..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9bbece25552b..09749e9f7398 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -661,12 +662,14 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -764,12 +767,14 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -1030,7 +1035,6 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;
@@ -1039,7 +1043,7 @@ hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
*pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ hns3_dbg_cap[i].name, str_yes_no(state));
}
*pos += scnprintf(buf + *pos, len - *pos, "\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b771a2daba43..6715222aeb66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -1198,7 +1199,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..c46490693594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,6 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -2573,7 +2573,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2586,22 +2586,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
loopback_en = req_common->enable &
HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
}
return 0;
@@ -2894,9 +2894,9 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
+ str_on_off(ingress));
*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ str_on_off(egress));
hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
NULL, ARRAY_SIZE(vlan_filter_items));
@@ -2915,11 +2915,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
return ret;
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ result[j++] = str_on_off(ingress);
+ result[j++] = str_on_off(egress);
+ result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA";
hclge_dbg_fill_content(content, sizeof(content),
vlan_filter_items, result,
ARRAY_SIZE(vlan_filter_items));
@@ -2958,19 +2958,19 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
+ result[j++] = str_on_off(vlan_cfg.accept_tag1);
+ result[j++] = str_on_off(vlan_cfg.accept_tag2);
+ result[j++] = str_on_off(vlan_cfg.accept_untag1);
+ result[j++] = str_on_off(vlan_cfg.accept_untag2);
+ result[j++] = str_on_off(vlan_cfg.insert_tag1);
+ result[j++] = str_on_off(vlan_cfg.insert_tag2);
+ result[j++] = str_on_off(vlan_cfg.shift_tag);
+ result[j++] = str_on_off(vlan_cfg.strip_tag1);
+ result[j++] = str_on_off(vlan_cfg.strip_tag2);
+ result[j++] = str_on_off(vlan_cfg.drop_tag1);
+ result[j++] = str_on_off(vlan_cfg.drop_tag2);
+ result[j++] = str_on_off(vlan_cfg.pri_only1);
+ result[j++] = str_on_off(vlan_cfg.pri_only2);
hclge_dbg_fill_content(content, sizeof(content),
vlan_offload_items, result,
@@ -3007,14 +3007,13 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
ptp->info.name);
pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
+ &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
+ &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3f17b3073e50..92f9b8ec76d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7875,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7883,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -8000,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -11200,9 +11200,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11976,7 +11976,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11984,7 +11984,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..9a456ebf9b7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 181af419b878..59cc9221185f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -226,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 25b8a3556004..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
bus = devm_mdiobus_alloc(&dev->ofdev->dev);
if (!bus) {
res = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0676fc547b6f..92647e137cf8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -4829,6 +4833,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4939,10 +4955,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5319,15 +5333,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5558,10 +5570,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 24ec9a4f1ffa..1640d2f27833 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -264,6 +264,7 @@ config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -336,7 +337,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e28f1905a4a0..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -529,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 356ac9faa5bf..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 532a0a595fe8..9de3e0ba3731 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -41,6 +41,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -82,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -271,6 +272,7 @@ struct iavf_adapter {
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -343,6 +345,17 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -354,10 +367,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -417,12 +438,18 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct virtchnl_qos_cap_list *qos_caps;
@@ -555,6 +582,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 74a1e9fe1821..288bb5b2e72e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1808,7 +1808,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6faa62bced3a..6d7ba4d67a19 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -710,6 +712,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -719,8 +762,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
+
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
+ }
}
/**
@@ -2075,6 +2122,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2239,7 +2290,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2604,6 +2658,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2627,6 +2787,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2718,6 +2896,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
if (QOS_ALLOWED(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -3143,15 +3324,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3711,10 +3895,8 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- netdev_unlock(netdev);
return ret;
}
@@ -4379,22 +4561,21 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
- netdev_unlock(netdev);
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
return -EBUSY;
- }
usleep_range(500, 1000);
}
@@ -4443,7 +4624,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
@@ -4456,7 +4636,6 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return err;
}
@@ -4478,12 +4657,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- netdev_lock(netdev);
+ netdev_assert_locked(netdev);
+
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
}
@@ -4536,6 +4715,7 @@ static int iavf_close(struct net_device *netdev)
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
@@ -5000,6 +5180,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
static int
iavf_verify_shaper(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
@@ -5108,6 +5307,8 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
.net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -5362,6 +5563,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
@@ -5518,6 +5723,8 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
cancel_work_sync(&adapter->reset_task);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..b4d5eda2e84f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 62212011c807..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 26b424fd6718..422312b8b54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -8,6 +8,26 @@
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
.count = rx_ring->count,
};
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(addr);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
ntu++;
@@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -896,60 +916,43 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct libeth_rx_pt decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
skb->ip_summed = CHECKSUM_NONE;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
- decoded = libie_rx_pt_parse(ptype);
- if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
- return;
-
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
- ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -960,52 +963,196 @@ checksum_fail:
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw0: quad word 0
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ **/
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
- struct libeth_rx_pt decoded;
+ bool rss_valid;
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- decoded = libie_rx_pt_parse(rx_ptype);
- if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- libeth_rx_pt_set_hash(skb, hash, decoded);
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1123,6 +1267,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct libeth_rqe_info fields;
struct libeth_fqe *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
- if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(skb, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_build_skb(rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1219,22 +1461,11 @@ skip_data:
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index f97c702c0802..79ad554f2d53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,25 +80,6 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
do { \
@@ -262,6 +243,8 @@ struct iavf_ring {
u16 next_to_use;
u16 next_to_clean;
+ u16 rxdid; /* Rx descriptor format */
+
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
@@ -269,6 +252,7 @@ struct iavf_ring {
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -295,6 +279,8 @@ struct iavf_ring {
* for this ring.
*/
+ struct iavf_ptp *ptp;
+
u32 rx_buf_len;
struct net_shaper q_shaper;
bool q_shaper_update;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f6b09e57abce..f9e1319620f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -178,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15d388b431c5..a6f0e5990be2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -4,6 +4,7 @@
#include <linux/net/intel/libie/rx.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -144,9 +145,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
@@ -177,6 +180,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -263,6 +314,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -275,6 +360,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
@@ -292,6 +378,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +398,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -1402,6 +1494,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -2098,6 +2251,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2509,6 +2693,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index dbdb83567364..fcb199efbea5 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1205,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1220,6 +1239,9 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
return err;
}
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
goto unroll_hw_init;
@@ -1533,6 +1555,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1548,6 +1607,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1651,6 +1721,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1659,10 +1730,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+
+ return 0;
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1673,6 +1773,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index ea40f7941259..19c3d37aa768 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -25,10 +25,10 @@ struct ice_health_status {
* The below lookup requires to be sorted by code.
*/
-static const char *const ice_common_port_solutions =
+static const char ice_common_port_solutions[] =
"Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
-static const char *const ice_port_number_label = "Port Number";
-static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
static const struct ice_health_status ice_health_status_lookup[] = {
{ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 71e05d30f0fd..fd083647c14a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -97,9 +97,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -204,6 +201,7 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
@@ -478,9 +476,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -542,6 +537,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -556,13 +559,7 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -612,7 +609,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -1047,10 +1044,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
-{
- return hw->ptp.phy_model;
-}
-
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 405ddd17de1b..2bc5c7f59844 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -571,25 +571,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index b2af8e3586f7..6db4ad8fc70b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -147,10 +147,6 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
@@ -473,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -801,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 7a2a2e8da8fa..59df31c2c83f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -186,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -195,120 +195,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810T based, false if not.
- */
-bool ice_is_e810t(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
-
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*
@@ -2271,7 +2157,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->nvm_unified_update);
break;
case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
case ICE_AQC_CAPS_MAX_MTU:
@@ -2408,7 +2295,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@@ -5765,6 +5652,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = &desc.params.get_link_topo;
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 15ba38543738..9b00aa0ddf10 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -131,7 +131,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -276,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -306,5 +301,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..69d5b1a28491 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,14 +2345,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
- if (!ice_is_e825c(hw))
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 8d806d8ad761..bce3ad6ca2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -95,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
@@ -322,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
+ "err:%d %s failed to enable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -367,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
+ "err:%d %s failed to disable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -479,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -518,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
@@ -1004,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
@@ -1362,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f241493a6ac8..7c2dc347e4e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3788,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3798,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3817,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -4773,7 +4770,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index ee9862ddfe15..1d118171de37 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index b2148dbe49b2..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..aa4bfbcf85d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -541,10 +541,22 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..bab3e81cad5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -228,61 +228,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
- */
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map = ice_alloc_irq(pf, true);
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
* @pf: board private structure to initialize
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- int i;
-
- if (!pf->msix_entries)
- return;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
+ struct msi_map map;
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
-
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -382,12 +355,6 @@ int ice_init_rdma(struct ice_pf *pf)
return -ENOMEM;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
@@ -395,8 +362,6 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
pf->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
return ret;
@@ -412,6 +377,5 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 1479b45738af..77ba26538b07 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -478,10 +478,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d0faa087793d..0bcf9d127ac9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -567,6 +571,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1420,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1764,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2582,7 +2596,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2595,12 +2608,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2755,11 +2762,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -3882,15 +3896,17 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e13bd5a6cb6c..049edeb60104 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2528,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2618,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2646,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -3304,22 +3260,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3689,6 +3631,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -4066,8 +4017,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -5087,6 +5037,12 @@ static int ice_init(struct ice_pf *pf)
if (err)
return err;
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
+ }
+
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
@@ -5186,11 +5142,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5198,6 +5155,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5221,8 +5179,8 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
@@ -6597,6 +6555,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e26320ce52ca..1fd1ae03eb90 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -298,8 +298,8 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
@@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -972,28 +981,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1432,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1465,46 +1461,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1740,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@@ -1783,6 +1778,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1824,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1843,9 +1839,10 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
@@ -2078,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2102,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2180,93 +2177,157 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2274,22 +2335,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
@@ -2550,13 +2625,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@@ -2623,6 +2694,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2644,10 +2737,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2758,6 +2861,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2777,7 +2939,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2912,14 +3074,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2971,8 +3131,9 @@ err:
static bool ice_is_primary(struct ice_hw *hw)
{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
+ !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
+ true;
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
@@ -2990,7 +3151,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -3007,7 +3168,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3127,6 +3288,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3134,16 +3297,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3162,8 +3323,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index a1d0e988c084..3b769a0cad00 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -304,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -342,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index ac46d1183300..003cdfada3ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec91822e9280..89bb8461284a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
int err;
/* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
else
err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
@@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
else
@@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -895,6 +896,17 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
@@ -998,7 +1010,7 @@ static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
/* Lanes 4..7 are in fact 0..3 on a second PHY */
lane %= hw->ptp.ports_per_phy;
- *addr = eth56g_phy_res[res_type].base[0] +
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
return 0;
@@ -1228,7 +1240,7 @@ static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_write_phy_eth56g(hw, port, addr, val);
}
@@ -1253,7 +1265,7 @@ static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_read_phy_eth56g(hw, port, addr, val);
}
@@ -1576,9 +1588,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
- FIELD_PREP(TS_PHY_LOW_M, lo);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -2639,18 +2650,17 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
}
/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
*
- * Perform PHC initialization steps specific to E82X devices.
+ * Perform E825-specific PTP hardware clock initialization steps.
*
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
+ * Return: 0 on success, negative error code otherwise.
*/
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
ice_sb_access_ena_eth56g(hw, true);
+
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2729,10 +2739,7 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u32 phy_rev;
- int err;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
@@ -2742,9 +2749,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G)
- ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -3219,7 +3223,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -4792,7 +4797,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4986,7 +4990,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5049,8 +5054,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5316,68 +5320,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5442,37 +5384,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5538,18 +5449,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5612,14 +5643,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_init_phy_e825(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5640,11 +5679,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5705,9 +5744,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5778,23 +5819,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5826,20 +5873,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5899,16 +5952,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5932,13 +5988,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5962,13 +6021,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -6025,14 +6084,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -6054,13 +6113,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_phc_e825(hw);
default:
return -EOPNOTSUPP;
}
@@ -6079,17 +6141,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6779ce120515..e5925ccc2613 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -324,6 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -395,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -431,13 +431,14 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
@@ -650,18 +651,25 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M GENMASK(7, 0)
-#define TS_PHY_HIGH_M GENMASK_ULL(39, 8)
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
@@ -772,36 +780,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -821,7 +812,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 8aabf7749aa5..f1648cf103b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -124,27 +124,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -178,6 +157,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -197,9 +177,6 @@ void ice_free_vfs(struct ice_pf *pf)
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -369,40 +346,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -426,11 +369,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -438,8 +379,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -478,13 +418,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -495,52 +428,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -553,7 +440,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -853,16 +740,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -915,7 +796,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -989,16 +869,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1017,7 +888,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1029,7 +901,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1102,20 +974,15 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
- ice_put_vf(vf);
- return -ENOSPC;
- }
-
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1144,7 +1011,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 380ba1e8b3b2..1e4f6f6ee449 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1809,6 +1809,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1958,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2441,7 +2466,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 806bce701df3..a4b1e9514632 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,6 +193,7 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
@@ -366,6 +367,7 @@ struct ice_rx_ring {
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..45cfaabc41cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -81,6 +81,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 33a1a5934c0d..0aab21113cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -871,14 +871,6 @@ union ice_phy_params {
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -888,7 +880,6 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..799b2c1f1184 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,9 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index ff4ad788d96a..7c3006eb68dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -498,6 +498,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -562,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
{
/* allocated Tx and Rx queues should be always equal for VF VSI */
return qid < vsi->alloc_txq;
@@ -1862,15 +1865,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
for (i = 0; i < qbw->num_queues; i++) {
if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
}
for (i = 0; i < qbw->num_queues; i++) {
@@ -1900,13 +1921,21 @@ err:
*/
static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
struct virtchnl_quanta_cfg *qquanta =
(struct virtchnl_quanta_cfg *)msg;
struct ice_vsi *vsi;
int ret;
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
@@ -1918,8 +1947,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
@@ -1948,7 +1975,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- start_qid = qquanta->queue_select.start_queue_id;
for (i = start_qid; i < end_qid; i++)
vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
@@ -1975,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
+ bool ena_ts;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
@@ -2104,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
rxdid = ICE_RXDID_LEGACY_1;
}
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
}
}
@@ -3031,8 +3063,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3044,7 +3076,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- rxdid.supported_rxdids = pf->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
@@ -4092,6 +4124,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4128,6 +4213,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
/* If you add a new op here please make sure to add it to
* ice_virtchnl_repr_ops as well.
*/
@@ -4264,6 +4351,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4501,6 +4590,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 0c629aef9baf..222990f229d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index c105a82ee136..a3d1579a619a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 14e3f0f89c78..9be4bd717512 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8975d2971bc3..a3a4eaa17739 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -989,7 +990,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..8dc5d55e26c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index a3d6b8f198a8..aa755dedb41d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -814,6 +814,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -927,15 +928,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -1536,13 +1541,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index dfd7cf1d9aa0..eae1b6f474e6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -595,7 +595,7 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
*/
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -661,10 +661,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -696,10 +696,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -798,7 +798,7 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_q->netdev);
@@ -891,6 +891,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -900,20 +901,21 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -923,12 +925,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -936,17 +938,18 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
else
- idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
}
/**
@@ -966,9 +969,10 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
+ u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -989,16 +993,16 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
- idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
goto skip_data;
if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
+ idpf_rx_add_frag(rx_buf, skb, fields.len);
else
- skb = idpf_rx_build_skb(rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -1033,8 +1037,7 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 977741c41498..bdf52cef3891 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2895,7 +2895,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
* skb->protocol must be set before this function is called
*/
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -2923,7 +2923,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (csum_bits.raw_csum_inv ||
+ if (!csum_bits.raw_csum_valid ||
decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -2946,10 +2946,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum = { };
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
@@ -2965,9 +2965,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
qword1);
csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum.raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
return csum;
@@ -3059,7 +3059,7 @@ static int
idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
u16 rx_ptype;
@@ -3552,8 +3552,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3580,8 +3578,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3769,8 +3765,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4182,7 +4176,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4191,12 +4186,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4240,9 +4235,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0f71a6f5557b..b029f566e57c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -213,25 +213,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -401,7 +382,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -438,13 +418,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -940,7 +919,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..f323e1c1989f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -509,6 +509,12 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b8111ad9a9a8..cd1d7b6c1782 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -579,6 +579,7 @@ struct igc_metadata_request {
struct xsk_tx_metadata *meta;
struct igc_ring *tx_ring;
u32 cmd_type;
+ u16 used_desc;
};
struct igc_q_vector {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 84307bb7313e..491d942cefca 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1092,7 +1092,8 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
@@ -1108,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1140,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1567,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1586,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1605,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1650,7 +1668,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -2953,9 +2972,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv)
return *(u64 *)_priv;
}
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
.tmo_request_timestamp = igc_xsk_request_timestamp,
.tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
};
static void igc_xdp_xmit_zc(struct igc_ring *ring)
@@ -2978,7 +3036,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (xsk_tx_peek_desc(pool, &xdp_desc) && budget >= 4) {
struct igc_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
@@ -2999,9 +3063,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
meta_req.tx_ring = ring;
meta_req.tx_buffer = bi;
meta_req.meta = meta;
+ meta_req.used_desc = 0;
xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
&meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
+
tx_desc = IGC_TX_DESC(ring, ntu);
tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
@@ -3019,9 +3093,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -7090,8 +7166,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 13bbd3346e01..c538e6b18aad 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -14,6 +14,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -24,8 +25,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
}
need_update = !!adapter->xdp_prog != !!prog;
- if (if_running && need_update)
- igc_close(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -36,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running && need_update)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index da91c582d439..f03925c1f521 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3185,6 +3185,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..07ea1954a276 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -817,30 +817,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 467f81239e12..481f917f7ed2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3185,6 +3185,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
case ixgbe_aci_opc_get_link_status:
ixgbe_handle_link_status_event(adapter, &event);
break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..eef25e11d938 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -1067,6 +1072,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 8d06ade3c7cd..617e07878e4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -171,6 +171,9 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_done_alt_write = 0x0904,
ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
/* debug commands */
ixgbe_aci_opc_debug_dump_internals = 0xFF08,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index f804b35d79c7..8ba037e3d9c2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -428,30 +428,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 4fe121b9f94b..147571fdada3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
@@ -5557,7 +5560,6 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index dd76c1b7ed3a..566c12c89520 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
if (frag_size)
@@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -6985,9 +6989,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -7024,9 +7027,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8216f843a7cd..0b27a695008b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..655dd4726d36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -925,7 +925,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a383b5ef5b2d..60f085b00a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -30,6 +30,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index cb6513ab35e7..69e0778f9ac1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a15cc86635d6..c3b6e0f60a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 09a5b5268205..fc59e50bafce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -744,24 +744,9 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
}
-static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = cn10k_ipsec_add_state,
.xdo_dev_state_delete = cn10k_ipsec_del_state,
- .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
};
static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 2b49bfec7869..84cd029a85aa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
{
@@ -330,6 +331,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -549,10 +554,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -571,12 +579,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -584,7 +592,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -884,7 +893,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -1028,6 +1037,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
+
chan_offset = qidx % pfvf->hw.tx_chan_cnt;
err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
@@ -1041,12 +1054,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -1055,8 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1275,9 +1301,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1292,6 +1319,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1306,16 +1334,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1332,7 +1365,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1419,6 +1453,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1462,21 +1497,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (IS_ERR(pool->xdp)) {
+ netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
+ return PTR_ERR(pool->xdp);
+ }
}
return 0;
@@ -1537,9 +1586,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1589,11 +1647,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 65814e3dc93f..1e88422825be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -21,6 +21,7 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
@@ -128,6 +129,12 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
+};
+
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@@ -531,6 +538,8 @@ struct otx2_nic {
/* Inline ipsec */
struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -1002,7 +1011,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
@@ -1032,6 +1041,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1094,7 +1105,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -1175,4 +1187,5 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..010385b29988 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -910,8 +910,12 @@ static int otx2_get_rxfh(struct net_device *dev,
return -ENOENT;
if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
indir[idx] = rss_ctx->ind_tbl[idx];
+ }
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1dde93e8af8..cfed9ec5b157 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -27,6 +27,7 @@
#include "qos.h"
#include <rvu_trace.h>
#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
@@ -2691,7 +2683,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2701,11 +2692,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2789,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2866,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3204,16 +3198,28 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 224cef938927..af8cabe828d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -19,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -29,11 +31,17 @@
DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
struct sk_buff *skb)
@@ -96,20 +104,22 @@ static unsigned int frag_num(unsigned int i)
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -326,6 +336,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
@@ -336,7 +347,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -368,6 +380,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -431,6 +445,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
@@ -439,16 +465,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -458,10 +490,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -502,6 +532,10 @@ process_cqe:
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
@@ -527,9 +561,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -554,6 +589,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -566,20 +602,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -1147,7 +1194,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1230,15 +1277,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1359,8 +1410,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
+ struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1377,16 +1429,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1405,7 +1475,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1416,16 +1486,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1434,41 +1518,64 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
+ if (xsk_buff)
+ xsk_buff_free(xsk_buff);
trace_xdp_exception(pfvf->netdev, prog, act);
break;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index d23810963fdb..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -104,6 +106,8 @@ struct otx2_snd_queue {
/* SQE ring and CPT response queue for Inline IPSEC */
struct qmem *sqe_ring;
struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -127,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -144,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e926c6ce96cf..7ef3ba477d49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -722,15 +722,27 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_shutdown_tc;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..ce10caea8511
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int err, i, work_done = 0, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
+ queue, OTX2_AF_XDP_FRAME);
+ if (!err) {
+ netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
+ break;
+ }
+ work_done++;
+ }
+
+ if (work_done)
+ xsk_tx_release(pool);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..c5dbae0e513b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 440a4c42b405..71ffb55d1fc4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -396,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -635,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_dl_port_register;
dev->features |= NETIF_F_HW_TC;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 95c4405b7d7b..7bfd3f230ff5 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
-config NET_AIROHA
- tristate "Airoha SoC Gigabit Ethernet support"
- depends on NET_DSA || !NET_DSA
- select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- Airoha SoC family.
-
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index ddbb7f4a516c..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 53485142938c..43197b28b3e7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -815,12 +815,60 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
static int mtk_mdio_init(struct mtk_eth *eth)
@@ -830,17 +878,12 @@ static int mtk_mdio_init(struct mtk_eth *eth)
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -2079,7 +2122,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2095,7 +2138,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2115,6 +2158,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -4474,6 +4520,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return phylink_ethtool_set_pauseparam(mac->phylink, pause);
}
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4506,6 +4566,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4615,6 +4677,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0d5225f1d3ee..90a377ab4359 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -453,6 +453,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -461,6 +463,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..c855fb799ce1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..76f202d7f055 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1427,15 +1427,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -526,28 +526,6 @@ out:
return res;
}
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
-{
- struct mlx4_zone_entry *zone;
- int res = 0;
-
- spin_lock(&zones->lock);
-
- zone = __mlx4_find_zone_by_uid(zones, uid);
-
- if (NULL == zone) {
- res = -1;
- goto out;
- }
-
- __mlx4_free_from_zone(zone, obj, count);
-
-out:
- spin_unlock(&zones->lock);
-
- return res;
-}
-
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
@@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 15c57e9517e9..b33285d755b9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,60 +48,43 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp)) {
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
ring->alloc_fail++;
return -ENOMEM;
}
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -286,9 +258,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (!ring->pp)
goto err_ring;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
@@ -319,6 +308,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -409,26 +400,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -445,6 +416,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -453,14 +425,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -487,7 +451,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -498,6 +462,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (frag_info->frag_stride == PAGE_SIZE / 2) {
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&page->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -511,10 +476,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -784,7 +748,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -823,7 +788,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -886,6 +852,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d7d856d1758a..b213094ea30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
-/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
- * <zones>.
- */
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
- u32 uid, u32 obj, u32 count);
-
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 28b70dcc652e..ad0d91a75184 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4e43f4a7d246..e3d0b13c1610 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
- int i;
-
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (!table->refs[i])
- continue;
-
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- *idx = i;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
-
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ea6070180c96..6ec7d6e0181d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,6 +31,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +81,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e733b81e18a2..e53dbdc0a7a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -1871,6 +1876,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1881,7 +1897,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1890,12 +1908,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- if (callback) {
- if (down_trylock(&dev->cmd.vars.throttle_sem))
- return -EBUSY;
- } else {
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
@@ -1941,8 +1964,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2104,18 +2130,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
struct mlx5_core_dev *dev;
- u16 opcode;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
dev = ctx->dev;
- opcode = work->opcode;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
/* Can't access "work" from this point on. It could have been freed in
* the callback.
*/
- if (mlx5_cmd_is_throttle_opcode(opcode))
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2124,6 +2154,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2131,11 +2163,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2371,10 +2435,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2408,6 +2478,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2420,6 +2492,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2427,3 +2500,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9a79674d27f1..891bbbbfbbf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -228,8 +228,15 @@ enum {
MLX5_INTERFACE_PROTOCOL_VNET,
MLX5_INTERFACE_PROTOCOL_DPLL,
+ MLX5_INTERFACE_PROTOCOL_FWCTL,
};
+static bool is_fwctl_supported(struct mlx5_core_dev *dev)
+{
+ /* fwctl is most useful on PFs, prevent fwctl on SFs for now */
+ return MLX5_CAP_GEN(dev, uctx_cap) && !mlx5_core_is_sf(dev);
+}
+
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
@@ -252,6 +259,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_mp_supported },
[MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
.is_supported = &is_dpll_supported },
+ [MLX5_INTERFACE_PROTOCOL_FWCTL] = { .suffix = "fwctl",
+ .is_supported = &is_fwctl_supported },
};
int mlx5_adev_idx_alloc(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index a2cf3e79693d..73cd74644378 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -327,7 +327,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..86253a89c24c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,6 +13,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +103,8 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 31142f6cc372..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
return 0;
}
errout:
- NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 979fc56205e1..32ed4963b8ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -95,8 +95,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -232,16 +230,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -386,7 +390,6 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -395,6 +398,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -660,7 +664,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1e8b7d330701..b5c3a2a9d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,9 +84,9 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 64b62ed17b07..aa36670d9a36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,9 @@
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -1240,7 +1247,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..bd5877acc5b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5f6a0605e4ae..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
@@ -296,11 +301,16 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
+#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -320,8 +330,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
(link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
- (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
- MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+ (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm));
}
/* get/set FEC admin field for a given speed */
@@ -368,6 +380,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -421,6 +445,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -494,6 +530,26 @@ out:
return 0;
}
+static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode,
+ u16 conf_fec)
+{
+ /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514.
+ * For link modes up to 25G per lane, the value is kept.
+ * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514.
+ * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD.
+ */
+ if (conf_fec != BIT(MLX5E_FEC_RS_528_514))
+ return conf_fec;
+
+ if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD);
+
+ if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514);
+
+ return conf_fec;
+}
+
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
@@ -530,14 +586,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
- /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
- * to link modes up to 25G per lane and to
- * MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50G or 100G per lane
- */
- if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
- i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
- conf_fec = (1 << MLX5E_FEC_RS_544_514);
+ conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec);
mlx5e_get_fec_cap_field(out, &fec_caps, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index d1da225f35da..fa2283dd383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -61,6 +61,7 @@ enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4,
MLX5E_FEC_RS_544_514 = 7,
MLX5E_FEC_LLRS_272_257_1 = 9,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index afd654583b6b..131ed97ca997 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -326,7 +326,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
@@ -696,7 +696,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..e75759533ae0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -317,10 +317,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +338,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +445,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..532c7fa94d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..74cd111ee320 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,6 +81,11 @@ struct mlx5e_rss {
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -449,6 +455,16 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
@@ -551,7 +567,7 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +580,14 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
+
return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +627,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..8ac902190010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -32,8 +32,11 @@ void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
@@ -44,9 +47,9 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..5fcbe47337b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -196,7 +194,7 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
}
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
struct mlx5e_rss *rss;
@@ -207,11 +205,12 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +222,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -497,6 +496,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +526,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..3e09d91281af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -34,6 +36,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -44,9 +49,10 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index d6c12d0ea55b..2e528b2c34d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -73,11 +73,6 @@ struct mlx5e_tc_act {
bool is_terminating_action;
};
-struct mlx5e_tc_flow_action {
- unsigned int num_entries;
- struct flow_action_entry **entries;
-};
-
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a065e8fafb1d..81332cd4a582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1349,6 +1349,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1361,6 +1387,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 721f35e59757..2162d776fe35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,8 +31,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +67,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index e7e01f3298ef..a0fc76a1bc08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -42,8 +42,7 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -753,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -788,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..5c762a71818d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index 11f724ad90db..19499072f67f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..e8df3aaf6562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 53ca16cb9c41..140606fcd23b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -46,7 +46,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5ec468268d1a..e837c21d3d21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -358,9 +371,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 94b291662087..f803e1c93590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
- return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
- return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..446e492c6bb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -182,13 +182,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 1b7132fa70de..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 9240cfe25d10..d743e823362a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -72,7 +72,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 501709ac310f..2dd842aac6fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -277,12 +277,12 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+ pkey = &attrs->addrs.saddr.a4;
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ pkey = &attrs->addrs.daddr.a4;
break;
default:
return;
@@ -303,6 +303,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -374,9 +384,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -428,7 +440,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -853,13 +866,13 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
@@ -953,21 +966,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -1035,7 +1033,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
- if (sa_entry->attrs.family == AF_INET)
+ if (sa_entry->attrs.addrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
@@ -1044,6 +1042,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
x->curlft.bytes += success_bytes - headers * success_packets;
}
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
+ }
+}
+
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack)
@@ -1116,9 +1151,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1196,7 +1232,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..a63c2289f8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -184,6 +194,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +206,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +219,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -274,18 +288,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e7b64679f121..98b6a3a623f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,10 +54,12 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
};
@@ -130,11 +145,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +163,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -200,11 +229,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -281,10 +307,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -359,9 +383,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +401,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +455,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +464,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +479,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +499,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,6 +585,15 @@ out:
return err;
}
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
@@ -507,8 +608,7 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
@@ -520,8 +620,7 @@ static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -577,13 +676,8 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +686,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +717,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -652,6 +779,28 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
@@ -659,15 +808,219 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination dest = {};
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
+ dest.ft = rx->ft.sa;
mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
}
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,80 +1031,61 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter = rx->fc->cnt;
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_ipsec_rx_status_destroy(ipsec, rx);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -941,7 +1275,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -954,7 +1288,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -982,7 +1316,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1150,9 +1484,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1164,21 +1503,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1190,15 +1534,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1340,7 +1684,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1387,7 +1732,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1404,7 +1749,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1427,11 +1772,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1440,8 +1785,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1475,7 +1820,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1576,6 +1921,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1589,7 +2013,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1599,16 +2023,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1656,6 +2079,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1679,6 +2109,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1691,7 +2126,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1723,10 +2158,10 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
@@ -1810,10 +2245,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1883,12 +2318,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1896,10 +2331,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1923,8 +2358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1940,7 +2374,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2061,6 +2496,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2070,6 +2506,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2098,6 +2537,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2195,12 +2639,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2236,7 +2686,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2376,7 +2827,7 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_rx *rx;
struct mlx5e_ipsec_tx *tx;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
tx = ipsec_tx(sa_entry->ipsec, attrs->type);
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx->allow_tunnel_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 92bf3fa44a3b..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cae39198b4db..fdf9e9bb99ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,6 +42,9 @@
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+#define MAX_LANES 8
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -237,14 +240,33 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
-}
-
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -891,37 +913,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
-{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
-
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -931,6 +935,7 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
+ [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -1074,50 +1079,53 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper) {
+ speed = 100 * data_rate_oper;
+ lanes = MAX_LANES;
}
- duplex = DUPLEX_FULL;
-
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1173,7 +1181,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -1235,8 +1243,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1341,28 +1349,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1370,13 +1372,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1386,7 +1390,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1458,18 +1462,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
+ bool symmetric;
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (err)
+ return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
@@ -1504,7 +1517,8 @@ static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxf
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
unlock:
mutex_unlock(&priv->state_lock);
@@ -2018,7 +2032,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d\n",
+ "Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
return i;
}
@@ -2607,12 +2621,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.cap_rss_ctx_supported = true,
.rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 773624bb2c5d..d68230a7b9f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8fcaee381b0e..3506024c2453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -311,8 +311,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -359,7 +359,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
return 0;
err_nomem:
- kvfree(shampo->bitmap);
+ bitmap_free(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
@@ -367,7 +367,7 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
- kvfree(rq->mpwqe.shampo->bitmap);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -393,7 +393,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -737,7 +739,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@@ -1614,7 +1616,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
@@ -2023,41 +2025,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -3816,8 +3789,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -4447,9 +4423,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
} else {
- netdev->netns_local = false;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index fdff9fd8a89e..2abab241f03b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -65,6 +65,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -855,6 +856,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -886,6 +889,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -900,7 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->features |= netdev->hw_features;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1963bc5adb18..5fd70b4d55be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -631,16 +631,16 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
__be32 key, u16 offset, u16 ksm_len)
{
memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
| MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
@@ -704,7 +704,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -814,12 +814,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -829,7 +829,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1d60465cc2ca..2f7a543feca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f370..1c121b435016 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,7 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
void mlx5e_ethtool_put_stat(u64 **data, u64 val)
{
@@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -1227,6 +1215,13 @@ out:
mutex_unlock(&priv->state_lock);
}
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- ethtool_puts(data, "link_down_events_phy");
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- ethtool_puts(data,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- mlx5e_ethtool_put_stat(
- data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events));
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_BE(
&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i));
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- mlx5e_ethtool_put_stat(
- data,
- MLX5E_READ_CTR64_BE(
- &priv->stats.pport
- .phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i));
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -2086,7 +2119,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2098,7 +2130,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 5961c569cfe0..8de6fcbd3a03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -215,7 +215,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -227,7 +226,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -309,6 +307,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -324,6 +325,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -381,7 +383,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -393,7 +394,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f8c7912abe0e..4fd853d19e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -525,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -535,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -626,7 +627,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 5f647358a05c..76e35c827da0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1863,7 +1863,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1876,7 +1876,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1884,7 +1884,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 982fe3714683..b7102e14d23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -32,7 +32,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -110,7 +110,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index ed977ae75fab..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..76382626ad41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 823c1ba456cd..b6ae384396b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -90,15 +90,30 @@ struct mlx5_esw_sched_node {
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
};
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
+{
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
list_del_init(&node->entry);
node->parent = parent;
- list_add_tail(&node->entry, &parent->children);
- node->esw = parent->esw;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
}
void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
@@ -305,8 +320,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -323,6 +339,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -358,7 +376,6 @@ static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
{
- struct list_head *parent_children;
struct mlx5_esw_sched_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -370,8 +387,7 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->type = type;
node->parent = parent;
INIT_LIST_HEAD(&node->children);
- parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
- list_add_tail(&node->entry, parent_children);
+ esw_qos_node_attach_to_parent(node);
return node;
}
@@ -396,7 +412,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -463,7 +480,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
@@ -986,10 +1004,10 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
@@ -1000,3 +1018,105 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
node = parent_priv;
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!list_empty(&node->children)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Node hierarchy depth exceeds the maximum supported level");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
+
+ if (!parent)
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
+
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 6eb8f6a648c8..ed40ec8f027e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -29,10 +29,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 20cc01ceee8a..a6a8eea5980c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -648,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -2828,9 +2829,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -4157,37 +4158,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err) {
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
vport_num, err);
@@ -4213,7 +4189,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
u16 *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err)
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
vport_num, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ae20c061e0fb..a47c29571f64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -1142,6 +1142,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 22dc23d991d2..6163bc98d94a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1456,7 +1456,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -2764,9 +2764,9 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -2775,25 +2775,43 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
+ if (vport_idx >= steering->esw_egress_acl_vports)
return NULL;
if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ steering->esw_egress_root_ns[vport_idx])
+ return &steering->esw_egress_root_ns[vport_idx]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ if (vport_idx >= steering->esw_ingress_acl_vports)
return NULL;
if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ steering->esw_ingress_root_ns[vport_idx])
+ return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
+ return NULL;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -3199,6 +3217,127 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3631,6 +3770,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3700,6 +3840,18 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
@@ -3850,8 +4002,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 20837e526679..0767239f651c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -115,7 +115,9 @@ enum fs_flow_table_type {
FS_FT_PORT_SEL = 0X9,
FS_FT_FDB_RX = 0xa,
FS_FT_FDB_TX = 0xb,
- FS_FT_MAX_TYPE = FS_FT_FDB_TX,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
};
enum fs_flow_table_op_mod {
@@ -158,6 +160,10 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -341,16 +347,10 @@ struct mlx5_fc {
u64 lastbytes;
};
-struct mlx5_fc_bulk_hws_data {
- struct mlx5hws_action *hws_action;
- struct mutex lock; /* protects hws_action */
- refcount_t hws_action_refcount;
-};
-
struct mlx5_fc_bulk {
struct mlx5_fs_bulk fs_bulk;
u32 base_id;
- struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fs_hws_data hws_data;
struct mlx5_fc fcs[];
};
@@ -434,7 +434,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b253d1673398..57476487e31f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -287,6 +287,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 566710d34a7b..6830a49fe682 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -345,15 +345,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -416,9 +413,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
@@ -426,7 +429,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
- err = mlx5_check_hotplug_interrupt(dev);
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
if (err)
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..91613d5a36cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -799,14 +809,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ed2ba272946b..d058cbb4a00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -523,8 +523,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
ndev = ldev->pf[last_idx].netdev;
}
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
@@ -584,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
@@ -593,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
if (first_idx < 0)
return -EINVAL;
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
dev0 = ldev->pf[first_idx].dev;
+
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -608,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
- struct lag_func *dev0;
-
- if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
- return;
-
- dev0 = &ldev->pf[first_idx];
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -643,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -1052,6 +1031,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 1770297a112e..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,7 +65,6 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
@@ -77,9 +76,6 @@ static int enable_mpesw(struct mlx5_lag *ldev)
return -EINVAL;
dev0 = ldev->pf[idx].dev;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index bde79cac33a9..d832a12ffec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -97,7 +97,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- };
+ }
goto destroy_fg;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d61a1a9297c9..65a94e46edcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -43,6 +43,8 @@
#include <linux/cpufeature.h>
#endif /* CONFIG_X86 */
+#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
+
enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
@@ -77,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
+struct mlx5_clock_dev_state {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_devcom_comp_dev *compdev;
+ struct mlx5_nb pps_nb;
+ struct work_struct out_work;
+};
+
+struct mlx5_clock_priv {
+ struct mlx5_clock clock;
+ struct mlx5_core_dev *mdev;
+ struct mutex lock; /* protect mdev and used in PTP callbacks */
+ struct mlx5_core_dev *event_mdev;
+};
+
+static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
+{
+ return container_of(clock, struct mlx5_clock_priv, clock);
+}
+
+static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
+}
+
+static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
+{
+ mlx5_clock_lockdep_assert(clock);
+
+ return clock_priv(clock)->mdev;
+}
+
+static void mlx5_clock_lock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_lock(&clock_priv(clock)->lock);
+}
+
+static void mlx5_clock_unlock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_unlock(&clock_priv(clock)->lock);
+}
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
+ u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
+ if (!err)
+ memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
+ MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ return err;
+}
+
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
+static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ s32 ret;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ ret = mlx5_clock_getmaxphase(mdev);
+ mlx5_clock_unlock(clock);
- return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
- MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
- MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
- s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
+ s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@@ -209,7 +286,7 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
- *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+ *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
return 0;
}
@@ -220,16 +297,23 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct system_time_snapshot history_begin = {0};
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
- if (!mlx5_is_ptm_source_time_available(mdev))
- return -EBUSY;
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
ktime_get_snapshot(&history_begin);
- return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
- &history_begin, cts);
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
#endif /* CONFIG_X86 */
@@ -263,8 +347,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@@ -272,7 +355,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@@ -295,12 +378,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
- struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
- out_work);
- struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
- pps_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
+ out_work);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@@ -330,7 +411,8 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
unsigned long flags;
clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
@@ -342,6 +424,7 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
+ mlx5_clock_unlock(clock);
return timer->overflow_period;
}
@@ -361,15 +444,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
+ const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
- struct mlx5_core_dev *mdev;
unsigned long flags;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@@ -385,6 +465,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_clock_settime(mdev, clock, ts);
+ mlx5_clock_unlock(clock);
+
+ return err;
+}
+
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@@ -404,7 +498,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct mlx5_core_dev *mdev;
u64 cycles, ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
@@ -414,6 +509,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
+ mlx5_clock_unlock(clock);
return 0;
}
@@ -444,14 +540,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
- return err;
+ goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@@ -459,17 +557,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ mlx5_clock_unlock(clock);
- return mlx5_ptp_adjtime_real_time(mdev, delta);
+ return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@@ -498,15 +602,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
u32 mult;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
- return err;
+ goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -518,7 +624,9 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
write_sequnlock_irqrestore(&clock->lock, flags);
ptp_schedule_worker(clock->ptp, timer->overflow_period);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@@ -527,18 +635,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
@@ -569,6 +673,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@@ -577,15 +689,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+ if (err)
+ goto unlock;
+
+ clock->pps_info.pin_armed[pin] = on;
+ clock_priv(clock)->event_mdev = mdev;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@@ -644,7 +764,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@@ -677,7 +797,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@@ -712,26 +832,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
- bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
+ bool rt_mode;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
- return -EOPNOTSUPP;
-
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -740,14 +852,29 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
- if (on) {
- bool rt_mode = mlx5_real_time_mode(mdev);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ rt_mode = mlx5_real_time_mode(mdev);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ /* Reject requests with unsupported flags */
+ if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- if (rt_mode && rq->perout.start.sec > U32_MAX)
- return -EINVAL;
+ if (rt_mode && rq->perout.start.sec > U32_MAX) {
+ err = -EINVAL;
+ goto unlock;
+ }
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -760,7 +887,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
- return err;
+ goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -773,13 +900,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
if (rt_mode)
- return 0;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@@ -866,10 +996,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
-static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
-
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -888,8 +1016,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static void mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
+ struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@@ -910,15 +1039,15 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@@ -968,16 +1097,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
- struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
+ pps_nb);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@@ -997,11 +1126,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
+ if (clock->shared) {
+ mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
+ break;
+ }
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_work(&clock->pps_info.out_work);
+ schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -1013,7 +1146,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@@ -1029,10 +1162,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
-static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@@ -1065,7 +1198,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@@ -1088,7 +1221,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@@ -1107,7 +1240,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1123,38 +1256,30 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
- mlx5_init_overflow_period(clock);
+ mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
- mlx5_ptp_settime(&clock->ptp_info, &ts);
+ mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
- mlx5_init_pin_config(clock);
+ mlx5_init_pin_config(mdev);
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
+ struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
- INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@@ -1163,35 +1288,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
- &mdev->pdev->dev);
+ clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
+ clock->shared ? "shared clock " : "",
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
- MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
- mlx5_eq_notifier_register(mdev, &clock->pps_nb);
-
if (clock->ptp)
ptp_schedule_worker(clock->ptp, 0);
}
-void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
- return;
-
- mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
- cancel_work_sync(&clock->pps_info.out_work);
-
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@@ -1199,3 +1316,248 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
+
+static void mlx5_clock_free(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
+
+ mlx5_destroy_clock_dev(mdev);
+ mutex_destroy(&cpriv->lock);
+ kfree(cpriv);
+ mdev->clock = NULL;
+}
+
+static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
+{
+ struct mlx5_clock_priv *cpriv;
+ struct mlx5_clock *clock;
+
+ cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ return -ENOMEM;
+
+ mutex_init(&cpriv->lock);
+ cpriv->mdev = mdev;
+ clock = &cpriv->clock;
+ clock->shared = shared;
+ mdev->clock = clock;
+ mlx5_clock_lock(clock);
+ mlx5_init_clock_dev(mdev);
+ mlx5_clock_unlock(clock);
+
+ if (!clock->shared)
+ return 0;
+
+ if (!clock->ptp) {
+ mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
+ mlx5_clock_free(mdev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ key, NULL, mdev);
+ if (IS_ERR(mdev->clock_state->compdev))
+ return;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ mdev->clock = next->clock;
+ /* clock info is shared among all the functions using the same clock */
+ mdev->clock_info = next->clock_info;
+ } else {
+ mlx5_clock_alloc(mdev, true);
+ }
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ if (!mdev->clock) {
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+ mdev->clock_state->compdev = NULL;
+ }
+}
+
+static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev)
+ cpriv->mdev = next;
+ mlx5_clock_unlock(clock);
+ } else {
+ mlx5_clock_free(mdev);
+ }
+
+ mdev->clock = NULL;
+ mdev->clock_info = NULL;
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+}
+
+static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
+ struct mlx5_core_dev *new_mdev,
+ struct mlx5_core_dev *old_mdev)
+{
+ struct ptp_clock_info *ptp_info = &clock->ptp_info;
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+ int i;
+
+ for (i = 0; i < ptp_info->n_pins; i++) {
+ if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
+ !clock->pps_info.pin_armed[i])
+ continue;
+
+ if (new_mdev) {
+ mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
+ cpriv->event_mdev = new_mdev;
+ } else {
+ cpriv->event_mdev = NULL;
+ }
+
+ if (old_mdev)
+ mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
+ }
+}
+
+void mlx5_clock_load(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_clock_priv *cpriv;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
+ MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
+ return;
+ }
+
+ cpriv = clock_priv(clock);
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+}
+
+void mlx5_clock_unload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
+ goto out;
+ }
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ mlx5_clock_lock(clock);
+ if (mdev == clock_priv(clock)->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, next, mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+out:
+ mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
+ cancel_work_sync(&mdev->clock_state->out_work);
+}
+
+static struct mlx5_clock null_clock;
+
+int mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
+ struct mlx5_clock_dev_state *clock_state;
+ u64 key;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mdev->clock = &null_clock;
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return 0;
+ }
+
+ clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
+ if (!clock_state)
+ return -ENOMEM;
+ clock_state->mdev = mdev;
+ mdev->clock_state = clock_state;
+
+ if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
+ if (mlx5_clock_identity_get(mdev, identity)) {
+ mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
+ } else {
+ memcpy(&key, &identity, sizeof(key));
+ mlx5_shared_clock_register(mdev, key);
+ }
+ }
+
+ if (!mdev->clock) {
+ err = mlx5_clock_alloc(mdev, false);
+ if (err) {
+ kfree(clock_state);
+ mdev->clock_state = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (mdev->clock->shared)
+ mlx5_shared_clock_unregister(mdev);
+ else
+ mlx5_clock_free(mdev);
+ kfree(mdev->clock_state);
+ mdev->clock_state = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index bd95b9f8d143..c18a652c0faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,35 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+ bool pin_armed[MAX_PIN_NUM];
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+};
+
+struct mlx5_clock {
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+ bool shared;
+};
+
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@@ -54,12 +83,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-void mlx5_init_clock(struct mlx5_core_dev *mdev);
+int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_clock_load(struct mlx5_core_dev *mdev);
+void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
- return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+ return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@@ -87,8 +118,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
-static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d58032dd0df7..c79699b94a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -11,6 +11,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
+ MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 711d14dea248..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -704,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..eb3bd9c7f66e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -61,6 +61,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 92eea6bea310..ab9434fe3ae6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -49,6 +49,7 @@ struct ttc_params {
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ec956c4bcebd..41e8660c819c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1038,7 +1038,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_reserved_gids(dev);
- mlx5_init_clock(dev);
+ err = mlx5_init_clock(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize hardware clock\n");
+ goto err_tables_cleanup;
+ }
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@@ -1046,7 +1050,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
- goto err_tables_cleanup;
+ goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@@ -1123,10 +1127,11 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
-err_tables_cleanup:
+err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
+err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
@@ -1205,24 +1210,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1235,10 +1240,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1249,8 +1253,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1359,6 +1363,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
+ mlx5_clock_load(dev);
+
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@@ -1442,6 +1448,7 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1468,6 +1475,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
@@ -1795,6 +1803,7 @@ static const int types[] = {
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 99de67c3aa74..2e02bdea8361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -280,6 +300,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -346,6 +438,8 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 50931584132b..549f1066d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, &query, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -1058,53 +1038,57 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1142,54 +1126,61 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
- speed = table[i];
- return speed;
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1200,10 +1191,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index b96909fbeb12..0864ba625c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -285,7 +285,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 3dbd4efa21a2..19dce1ba512d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -220,7 +220,7 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
bool drain)
{
unsigned long timeout = jiffies +
- msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 487e75476b0a..e8f98c109b99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -130,12 +130,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id)
-{
- hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_fg_attr *fg_attr,
u32 *group_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 610c63d81ad9..51d9e0291ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -258,9 +258,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
u8 fw_ft_type, u32 table_id);
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id);
-
int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
u32 *rtc_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 10ece7df1cfa..c8cc0c8115f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -500,7 +500,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
return 0;
err_conflict:
- mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
return -EINVAL;
}
@@ -1985,8 +1986,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
continue;
/* Reuse definer and set LRU (move to be first in the list) */
- list_del_init(&cached_definer->list_node);
- list_add(&cached_definer->list_node, &cache->list_head);
+ list_move(&cached_definer->list_node, &cache->list_head);
cached_definer->refcount++;
return cached_definer->definer.obj_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index f34bbbbba1c2..1b787cd66e6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -66,6 +66,8 @@ static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
xa_init(&hws_pool->table_dests);
xa_init(&hws_pool->vport_dests);
xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
return 0;
cleanup_insert_hdr:
@@ -88,10 +90,17 @@ destroy_tag:
static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
{
struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
struct mlx5hws_action *action;
struct mlx5_fs_pool *pool;
unsigned long i;
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
xa_for_each(&hws_pool->vport_vhca_dests, i, action)
mlx5hws_action_destroy(action);
xa_destroy(&hws_pool->vport_vhca_dests);
@@ -459,6 +468,106 @@ mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
flags);
}
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
@@ -519,26 +628,101 @@ mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
return mlx5hws_action_create_last(ctx, flags);
}
-static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
switch (mlx5hws_action_get_type(fs_action->action)) {
case MLX5HWS_ACTION_TYP_CTR:
mlx5_fc_put_hws_action(fs_action->counter);
break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
default:
mlx5hws_action_destroy(fs_action->action);
}
}
static void
-mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
int *num_fs_actions)
{
int i;
/* Free in reverse order to handle action dependencies */
for (i = *num_fs_actions - 1; i >= 0; i--)
- mlx5_fs_destroy_fs_action(*fs_actions + i);
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
*num_fs_actions = 0;
kfree(*fs_actions);
*fs_actions = NULL;
@@ -735,8 +919,25 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- err = -EOPNOTSUPP;
- goto free_actions;
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
@@ -784,6 +985,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
type_uplink);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
@@ -850,7 +1059,7 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
free_dest_actions_alloc:
kfree(dest_actions);
free_fs_actions_alloc:
@@ -900,7 +1109,7 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
out_err:
mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
@@ -920,7 +1129,8 @@ static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
rule->bwc_rule = NULL;
- mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
return err;
}
@@ -958,11 +1168,12 @@ static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
if (ret)
goto restore_actions;
- mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
return ret;
restore_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index cbddb72d4362..8b56298288da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -22,6 +22,8 @@ struct mlx5_fs_hws_actions_pool {
struct xarray table_dests;
struct xarray vport_vhca_dests;
struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
};
struct mlx5_fs_hws_context {
@@ -49,6 +51,8 @@ struct mlx5_fs_hws_rule_action {
struct mlx5hws_action *action;
union {
struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
};
};
@@ -58,6 +62,26 @@ struct mlx5_fs_hws_rule {
int num_fs_actions;
};
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
#ifdef CONFIG_MLX5_HW_STEERING
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 2ae4ac62b0e2..f1ecdba74e1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -405,46 +405,17 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
struct mlx5_fc *counter)
{
- u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
- fc_bulk_hws = &fc_bulk->hws_data;
- /* try avoid locking if not necessary */
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
- return fc_bulk_hws->hws_action;
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
- mutex_lock(&fc_bulk_hws->lock);
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return fc_bulk_hws->hws_action;
- }
- fc_bulk_hws->hws_action =
- mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
- if (!fc_bulk_hws->hws_action) {
- mutex_unlock(&fc_bulk_hws->lock);
- return NULL;
- }
- refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
- mutex_unlock(&fc_bulk_hws->lock);
-
- return fc_bulk_hws->hws_action;
+ return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
-
- /* try avoid locking if not necessary */
- if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
- return;
-
- mutex_lock(&fc_bulk_hws->lock);
- if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return;
- }
- mlx5hws_action_destroy(fc_bulk_hws->hws_action);
- fc_bulk_hws->hws_action = NULL;
- mutex_unlock(&fc_bulk_hws->lock);
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index d9dc4f2d0dc6..f51ed24526b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
- list_del_init(&cached_pattern->ptrn_list_node);
- list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
cached_pattern->refcount++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 60cb4527588a..65740bb68b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -516,30 +516,6 @@ def_xa_destroy:
return NULL;
}
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
- int ret = 0;
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mlx5dr_domain_lock(dmn);
- ret = mlx5dr_send_ring_force_drain(dmn);
- mlx5dr_domain_unlock(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
- flags, ret);
- return ret;
- }
- }
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
- ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
- return ret;
-}
-
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index f57c84e5128b..4fd4e8483382 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1331,36 +1331,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
kfree(send_ring->sync_buff);
kfree(send_ring);
}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- struct postsend_info send_info = {};
- u8 data[DR_STE_SIZE];
- int num_of_sends_req;
- int ret;
- int i;
-
- /* Sending this amount of requests makes sure we will get drain */
- num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
- /* Send fake requests forcing the last to be signaled */
- send_info.write.addr = (uintptr_t)data;
- send_info.write.length = DR_STE_SIZE;
- send_info.write.lkey = 0;
- /* Using the sync_mr in order to write/read */
- send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey;
-
- for (i = 0; i < num_of_sends_req; i++) {
- ret = dr_postsend_icm_data(dmn, &send_info);
- if (ret)
- return ret;
- }
-
- spin_lock(&send_ring->lock);
- ret = dr_handle_pending_wc(dmn, send_ring);
- spin_unlock(&send_ring->lock);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 7618c6147f86..cc328292bf84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
@@ -1473,7 +1473,6 @@ struct mlx5dr_send_ring {
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
struct mlx5dr_ste *ste,
u8 *data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 0bb3724c10c2..fc8a2169d1a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u16 peer_vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 0d5f750faa45..d10d4c396040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1199,6 +1199,31 @@ int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *ou
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5b44c931b660..058dcabfaa2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -2214,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 6bed495dcf0f..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d714311fd884..3080ea032e7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1574,10 +1574,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2407,8 +2409,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -5230,25 +5230,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index b10f80fc651b..37cd1d002b3b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -661,10 +661,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -754,9 +754,6 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index a54eedb69a3f..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7d6d859cef3f..464821dd492d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8184,41 +8184,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return rif;
-}
-
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
- u16 vid = 0;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- goto out;
-
- /* We only return the VID for VLAN RIFs. Otherwise we return an
- * invalid value (0).
- */
- if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
- goto out;
-
- vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return vid;
-}
-
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -8417,19 +8382,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->common.rif_index;
}
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
-{
- struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
- struct mlxsw_sp_vr *ul_vr;
-
- ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
- if (WARN_ON(IS_ERR(ul_vr)))
- return 0;
-
- return ul_vr->id;
-}
-
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
return lb_rif->ul_rif_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 0432c7cc6b07..313efab5c324 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 1f9c1c86839f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..80ee5c4825dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..0dbc634adb4b 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -20,6 +20,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
- fbnic_time.o
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 14751f16e125..4ca7b99ef131 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -60,6 +60,12 @@ struct fbnic_dev {
u8 mac_addr_boundary;
u8 tce_tcam_last;
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
@@ -180,6 +186,9 @@ void fbnic_dbg_exit(void);
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index aeb9f333f4c7..d9c0dc1c2af9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -30,6 +30,7 @@ static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
FBNIC_BOUNDS(RSFEC),
FBNIC_BOUNDS(MAC_MAC),
FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
FBNIC_BOUNDS(PUL_USER),
FBNIC_BOUNDS(QUEUE),
FBNIC_BOUNDS(RPC_RAM),
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 02bb81b3c506..3b12a0ab5906 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -605,8 +605,11 @@ enum {
FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
};
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -677,6 +680,9 @@ enum {
#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
(0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
(0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
#define FBNIC_RPC_TCAM_IPSRC(m, n)\
@@ -782,13 +788,52 @@ enum {
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
@@ -928,43 +973,6 @@ enum {
#define FBNIC_MAX_QUEUES 128
#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
-/* PUL User Registers*/
-#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
- 0x3106e /* 0xc41b8 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
- 0x31070 /* 0xc41c0 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
- 0x31071 /* 0xc41c4 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
- 0x31072 /* 0xc41c8 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
- 0x31073 /* 0xc41cc */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
- 0x31074 /* 0xc41d0 */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
- 0x31075 /* 0xc41d4 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
- 0x31076 /* 0xc41d8 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
- 0x31077 /* 0xc41dc */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
- 0x31078 /* 0xc41e0 */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
- 0x31079 /* 0xc41e4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
- 0x3107a /* 0xc41e8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
- 0x3107b /* 0xc41ec */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
- 0x3107c /* 0xc41f0 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
- 0x3107d /* 0xc41f4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
- 0x3107e /* 0xc41f8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
- 0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
-
/* BAR 4 CSRs */
/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index 59951b5abdb7..e8f2d7f2d962 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -10,6 +10,166 @@
static struct dentry *fbnic_dbg_root;
+static void fbnic_dbg_desc_break(struct seq_file *s, int i)
+{
+ while (i--)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+}
+
+static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ i, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -48,6 +208,20 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_pcie_stats_fops);
+ debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
}
void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 20cd9f5f89e2..0a751a2aaf73 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -135,6 +136,168 @@ static void fbnic_clone_free(struct fbnic_net *clone)
kfree(clone);
}
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
int i;
@@ -218,11 +381,234 @@ fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
static int fbnic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct fbnic_net *fbn = netdev_priv(netdev);
int ret = -EOPNOTSUPP;
+ u32 special = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
@@ -232,6 +618,22 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH:
ret = fbnic_get_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
}
return ret;
@@ -272,6 +674,406 @@ fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct fbnic_net *fbn = netdev_priv(netdev);
@@ -281,6 +1083,12 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXFH:
ret = fbnic_set_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
}
return ret;
@@ -374,6 +1182,61 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
return 0;
}
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
static void fbnic_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -523,14 +1386,14 @@ static void fbnic_get_ts_stats(struct net_device *netdev,
unsigned int start;
int i;
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
for (i = 0; i < fbn->num_tx_queues; i++) {
ring = fbn->tx[i];
do {
start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
ts_stats->pkts += ts_packets;
ts_stats->lost += ts_lost;
@@ -586,9 +1449,17 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
}
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .supported_coalesce_params =
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
@@ -598,6 +1469,9 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
.get_rxfh = fbnic_get_rxfh,
.set_rxfh = fbnic_set_rxfh,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
.get_channels = fbnic_get_channels,
.set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index bbc7c1c0c37e..88db3dacb940 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -494,16 +494,13 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
{
- u32 active_slot = 0, all_multi = 0;
+ u32 all_multi = 0, version = 0;
struct fbnic_dev *fbd = opaque;
- u32 speed = 0, fec = 0;
- size_t commit_size = 0;
bool bmc_present;
int err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
- fbd->fw_cap.running.mgmt.version);
-
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
@@ -524,43 +521,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
return -EINVAL;
}
- get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
- fbd->fw_cap.running.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
- if (!commit_size)
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
- fbd->fw_cap.stored.mgmt.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
- fbd->fw_cap.running.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.running.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
- fbd->fw_cap.stored.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
- fbd->fw_cap.stored.undi.version);
- get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.undi.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
- fbd->fw_cap.active_slot = active_slot;
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
- fbd->fw_cap.link_speed = speed;
- fbd->fw_cap.link_fec = fec;
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
if (bmc_present) {
@@ -575,7 +570,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (err)
return err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
} else {
memset(fbd->fw_cap.bmc_mac_addr, 0,
sizeof(fbd->fw_cap.bmc_mac_addr));
@@ -743,9 +739,9 @@ free_message:
}
static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
FBNIC_TLV_ATTR_LAST
};
@@ -754,32 +750,31 @@ static int fbnic_fw_parse_tsene_read_resp(void *opaque,
{
struct fbnic_fw_completion *cmpl_data;
struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
int err = 0;
/* Verify we have a completion pointer to provide with data */
cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
if (!cmpl_data)
- return -EINVAL;
+ return -ENOSPC;
- if (results[FBNIC_TSENE_ERROR]) {
- err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
- if (err)
- goto exit_complete;
- }
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
- if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
err = -EINVAL;
- goto exit_complete;
+ goto msg_err;
}
cmpl_data->u.tsene.millidegrees =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
cmpl_data->u.tsene.millivolts =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
-exit_complete:
- cmpl_data->result = err;
+msg_err:
+ cmpl_data->result = err_resp ? : err;
complete(&cmpl_data->done);
fbnic_fw_put_cmpl(cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fe68333d51b1..a3618e7826c2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -139,10 +139,10 @@ enum {
};
enum {
- FBNIC_TSENE_THERM = 0x0,
- FBNIC_TSENE_VOLT = 0x1,
- FBNIC_TSENE_ERROR = 0x2,
- FBNIC_TSENE_MSG_MAX
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
};
enum {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 7a96b6ee773f..79a01fdd1dd1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -487,8 +487,9 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_queue_stats *stats;
+ u64 bytes, packets, alloc_fail;
+ u64 csum_complete, csum_none;
unsigned int start;
- u64 bytes, packets;
if (!rxr)
return;
@@ -498,10 +499,16 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
rx->bytes = bytes;
rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -510,6 +517,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
unsigned int start;
u64 bytes, packets;
@@ -521,10 +529,18 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -535,9 +551,16 @@ static void fbnic_get_base_stats(struct net_device *dev,
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
@@ -588,7 +611,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd)
* Allocate and initialize the netdev and netdev private structure. Bind
* together the hardware, netdev, and pci data structures.
*
- * Return: 0 on success, negative on failure
+ * Return: Pointer to net_device on success, NULL on failure
**/
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
{
@@ -618,6 +641,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
@@ -628,15 +655,32 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbnic_rss_key_fill(fbn->rss_key);
fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
netdev->hw_features |= netdev->features;
netdev->vlan_features |= netdev->features;
netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index a392ac1cc4f2..561837e80ec8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -12,6 +12,10 @@
#include "fbnic_txrx.h"
#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
@@ -27,6 +31,11 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
u16 num_napi;
struct phylink *phylink;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index bb11fc83367d..860b02b22c15 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -133,7 +133,6 @@ int fbnic_phylink_init(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
struct phylink *phylink;
- fbn->phylink_pcs.neg_mode = true;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
fbn->phylink_config.dev = &netdev->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index c25bd300b902..8ff07b5562e3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -60,7 +61,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
FIELD_GET(RXH_##_fh, _val))
-static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
{
u32 flow_hash = fbn->rss_flow_hash[flow_type];
u32 rss_en_mask = 0;
@@ -698,6 +699,359 @@ void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
__fbnic_write_tce_tcam(fbd);
}
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
void fbnic_clear_rules(struct fbnic_dev *fbd)
{
u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 0d8285fa5b45..6892414195c3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -7,6 +7,8 @@
#include <uapi/linux/in6.h>
#include <linux/bitfield.h>
+struct in_addr;
+
/* The TCAM state definitions follow an expected ordering.
* They start out disabled, then move through the following states:
* Disabled 0 -> Add 2
@@ -32,6 +34,12 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
@@ -47,6 +55,13 @@ struct fbnic_mac_addr {
DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
};
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
struct fbnic_act_tcam {
struct {
u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
@@ -81,6 +96,11 @@ enum {
#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
/* We reserve the last 14 entries for RSS rules on the host. The BMC
* unicast rule will need to be populated above these and is expected to
* use MACDA TCAM entry 23 to store the BMC MAC address.
@@ -88,6 +108,9 @@ enum {
#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
(FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
/* Flags used to identify the owner for this MAC filter. Note that any
* flags set for Broadcast thru Promisc indicate that the rule belongs
* to the RSS filters for the host.
@@ -168,6 +191,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
@@ -177,6 +201,17 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
{
return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
index 2a174ab062a3..517ed8b2f1cb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
/**
* fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: unsigned 64b value containing integer value
**/
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
{
__le64 le64_value = 0;
+ if (!attr)
+ return def;
+
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_signed - Retrieve signed value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: signed 64b value containing integer value
**/
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
{
- int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
__le64 le64_value = 0;
+ int shift;
s64 value;
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
/* Copy the value and adjust for byte ordering */
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_string - Retrieve string value from result
* @attr: Attribute to retrieve data from
- * @str: Pointer to an allocated string to store the data
- * @max_size: The maximum size which can be in str
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
*
- * Return: the size of the string read from firmware
+ * Return: the size of the string read from firmware or negative error.
**/
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size)
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
{
- max_size = min_t(size_t, max_size,
- (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
- memcpy(str, &attr->value, max_size);
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
- return max_size;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
index 67300ab44353..c34bf87eeec9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
return !!attr;
}
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size);
-
-#define get_unsigned_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_unsigned(result); \
-} while (0)
-
-#define get_signed_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_signed(result); \
-} while (0)
-
-#define get_string_result(id, size, str, max_size) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- size = fbnic_tlv_attr_get_string(result, str, max_size); \
-} while (0)
-
-#define get_bool(id) (!!(results[id]))
-
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
@@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
const struct fbnic_tlv_parser *parser);
int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
#define FBNIC_TLV_MSG_ERROR \
FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index d4d7027df9a0..ac11389a764c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -18,6 +19,7 @@ enum {
struct fbnic_xmit_cb {
u32 bytecount;
+ u16 gso_segs;
u8 desc_count;
u8 flags;
int hw_head;
@@ -113,6 +115,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
return !res;
}
@@ -174,8 +181,72 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb)
}
static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int l2len, i3len;
if (fbnic_tx_tstamp(skb))
@@ -190,7 +261,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
skb->csum_offset / 2));
- *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
@@ -198,12 +277,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
}
static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
{
skb_checksum_none_assert(skb);
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
return;
+ }
if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +294,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
}
}
@@ -329,7 +412,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
/* Write all members within DWORD to condense this into 2 4B writes */
FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
if (fbnic_tx_offloads(ring, skb, meta))
goto err_free;
@@ -356,6 +441,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
}
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
@@ -376,9 +514,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
!FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
!FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
!FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
- return features & ~NETIF_F_CSUM_MASK;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- return features;
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
}
static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
@@ -429,7 +570,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
- total_packets += 1;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
napi_consume_skb(skb, napi_budget);
}
@@ -444,7 +585,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
- ring->stats.ts_lost += ts_lost;
+ ring->stats.twq.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -456,9 +597,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
- netif_txq_completed_wake(txq, total_packets, total_bytes,
- fbnic_desc_unused(ring),
- FBNIC_TX_DESC_WAKEUP);
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
@@ -507,7 +652,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
skb_tstamp_tx(skb, &hwtstamp);
u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.ts_packets++;
+ ring->stats.twq.ts_packets++;
u64_stats_update_end(&ring->stats.syncp);
}
@@ -661,8 +806,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
struct page *page;
page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page)
+ if (!page) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.rx.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
+
break;
+ }
fbnic_page_pool_init(bdq, i, page);
fbnic_bd_prep(bdq, i, page);
@@ -875,12 +1025,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
{
struct net_device *netdev = nv->napi.dev;
struct fbnic_ring *rcq = &qt->cmpl;
- fbnic_rx_csum(rcd, skb, rcq);
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -898,7 +1049,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
- unsigned int packets = 0, bytes = 0, dropped = 0;
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
@@ -947,14 +1099,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
- fbnic_populate_skb_fields(nv, rcd, skb, qt);
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
packets++;
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
- dropped++;
+ if (!skb) {
+ alloc_failed++;
+ dropped++;
+ } else {
+ dropped++;
+ }
+
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -977,6 +1137,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Re-add ethernet header length (removed in fbnic_build_skb) */
rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
u64_stats_update_end(&rcq->stats.syncp);
/* Unmap and free processed buffers */
@@ -1054,6 +1217,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
@@ -1065,8 +1233,14 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
- fbn->tx_stats.ts_lost += stats->ts_lost;
- fbn->tx_stats.ts_packets += stats->ts_packets;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
@@ -1142,7 +1316,9 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
.dev = nv->dev,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
- .max_len = PAGE_SIZE
+ .max_len = PAGE_SIZE,
+ .napi = &nv->napi,
+ .netdev = fbn->netdev,
};
struct page_pool *pp;
@@ -2010,9 +2186,51 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *rcq)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
u32 rcq_ctl;
@@ -2040,8 +2258,8 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
/* Store interrupt information for the completion queue */
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
/* Enable queue */
@@ -2080,12 +2298,7 @@ void fbnic_enable(struct fbnic_net *fbn)
static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = nv->fbd;
- u32 val;
-
- val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
-
- fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
}
void fbnic_napi_enable(struct fbnic_net *fbn)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index c2a94f31f71b..f46616af41ea 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -24,13 +24,29 @@ struct fbnic_net;
#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
+
#define FBNIC_TXQ_SIZE_DEFAULT 1024
#define FBNIC_HPQ_SIZE_DEFAULT 256
#define FBNIC_PPQ_SIZE_DEFAULT 256
#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -56,9 +72,22 @@ struct fbnic_pkt_buff {
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ } rx;
+ };
u64 dropped;
- u64 ts_packets;
- u64 ts_lost;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 4a777b449ecd..0be44dcb3393 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -942,6 +942,12 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
+ if (extts_request->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (on) {
extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
if (extts_pin < 0)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 3234a960fcc3..0af143ec0f86 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -828,7 +828,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 6a0e5b83ecd0..74ad1d73b465 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -338,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 138ac58fae51..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -375,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 11457b6296cc..4ffaf7588885 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -331,6 +337,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -343,6 +350,7 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
@@ -666,8 +674,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +699,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -770,7 +783,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +812,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,6 +837,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -841,8 +865,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,11 +889,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -1041,7 +1071,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1148,6 +1178,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1157,8 +1188,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1218,6 +1252,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1435,8 +1470,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
err = mana_gd_setup_irqs(pdev);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
return err;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1454,12 +1491,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1470,6 +1509,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1488,8 +1528,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1498,9 +1540,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1578,6 +1621,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..1ba49602089b 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -440,7 +440,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +530,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..d30721d4516f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index ae76ecc7a5d3..1423df8531f7 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -13,6 +13,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -52,10 +53,12 @@ static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
@@ -64,7 +67,7 @@ static int mana_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -176,6 +179,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -256,6 +262,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -687,6 +696,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -1304,8 +1314,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1547,8 +1559,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -2081,6 +2097,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2417,6 +2435,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2663,12 +2682,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2677,14 +2702,20 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
if (gd->gdma_context->is_pf) {
err = mana_pf_register_filter(apc);
@@ -2825,8 +2856,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2875,6 +2908,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2970,6 +3005,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -3011,8 +3048,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &num_ports);
@@ -3068,8 +3107,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
err = add_adev(gd);
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3131,23 +3176,30 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
}
-struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
{
struct net_device *ndev;
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "Taking primary netdev without holding the RCU read lock");
if (port_index >= ac->num_ports)
return NULL;
- /* When mana is used in netvsc, the upper netdevice should be returned. */
- if (ac->ports[port_index]->flags & IFF_SLAVE)
- ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
- else
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
ndev = ac->ports[port_index];
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
return ndev;
}
-EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, "NET_MANA");
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 515069d5637b..671af5d4c5d2 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -565,20 +565,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f915c423fe70..886061d7351a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fa167b1aa019..5222a035fd19 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
- /* Valiate PF can send such a request */
+ /* Validate PF can send such a request */
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Determine if the unicast filtering is acceptible by PF */
+ /* Determine if the unicast filtering is acceptable by PF */
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
@@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
vfid);
return rc;
}
@@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
- * where the first engine has already relased IOV.
+ * where the first engine has already released IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 8a8ea51c639e..fe136f61586f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -114,7 +114,8 @@ config R8169
will be called r8169. This is recommended.
config R8169_LEDS
- def_bool R8169 && LEDS_TRIGGER_NETDEV
+ bool "Support for controlling the NIC LEDs"
+ depends on R8169 && LEDS_TRIGGER_NETDEV
depends on !(R8169=y && LEDS_CLASS=m)
help
Optional support for controlling the NIC LED's with the netdev
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5a5eba49c651..4eebd9cb40a3 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -89,6 +89,7 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -169,6 +170,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
{}
};
@@ -2850,6 +2852,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
+ rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -3822,6 +3850,7 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -5199,6 +5228,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5222,12 +5278,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5251,9 +5313,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
/* mimic behavior of r8125/r8126 vendor drivers */
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- phy_set_eee_broken(tp->phydev,
- ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5326,6 +5388,9 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5360,7 +5425,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5409,11 +5474,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..b4365906669f 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -179,8 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ PTP_FALLING_EDGE))
return -EOPNOTSUPP;
if (req->index)
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..4c3e8cc5046f 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -29,8 +29,8 @@ static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 84d09a8973b7..aba772e14555 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1287,17 +1287,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fe0bf1d3217a..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_SG;
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 452009ed7a43..47349c148c0c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -4416,6 +4419,7 @@ const struct efx_nic_type efx_x4_nic_type = {
.can_rx_scatter = true,
.always_rx_scatter = true,
.option_descriptors = true,
+ .flash_auto_partition = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..d941f073f1eb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -452,7 +452,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
netif_set_tso_max_segs(net_dev,
ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 650136dfc642..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -476,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index c88ec3e24836..5a14d94163b1 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1003,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 50f097487b14..6fd0c1e9a7d5 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -755,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d461b1a6ce81..dbd2ee915838 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1625,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1645,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -2163,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2185,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2209,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2223,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2246,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2254,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2284,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2296,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2389,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index cdb17d7c147f..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -392,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -424,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..9cb339c461fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,7 +22483,8 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
* values from the PCIE_INTERFACE enum for for the INTF field. It's only
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f70a7b7d6345..5c0f306fb019 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -956,8 +956,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -1006,6 +1004,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1131,8 +1130,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1191,6 +1188,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1383,6 +1381,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1559,6 +1559,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0d93164988fc..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f539813878f5..2e1106097965 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dc99821c6226..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4cc85a36a1ab..3c820ef56775 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -181,6 +181,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -307,6 +318,7 @@ config DWMAC_INTEL
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b26f0e79c2b3..594883fb4164 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e25db747a81a..412b07e77945 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -101,8 +101,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -530,6 +530,20 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index ef99ef3f1ab4..37fe7c288878 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -59,10 +59,11 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
int err;
@@ -79,11 +80,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
fallthrough;
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -93,7 +90,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ plat_dat->phy_interface);
return ERR_PTR(-ENOTSUPP);
}
@@ -111,14 +108,14 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
anarion_gmac_init(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bd4eb187f8c6..cd431f84f34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,14 +29,21 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
+static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
@@ -46,7 +53,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -123,49 +132,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,11 +147,10 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
bool needs_calibration = false;
- long rate = 125000000;
u32 value;
int err;
@@ -193,11 +161,10 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
fallthrough;
case SPEED_10:
- rate = rgmii_clock(speed);
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
@@ -240,10 +207,6 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
}
static int tegra_eqos_init(struct platform_device *pdev, void *priv)
@@ -261,7 +224,7 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv)
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -274,63 +237,24 @@ static int tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ eqos->clk_slave = plat_dat->stmmac_clk;
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -351,10 +275,11 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = tegra_eqos_init;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
@@ -365,15 +290,7 @@ reset:
reset_control_assert(eqos->rst);
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -383,27 +300,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -434,9 +353,23 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
+
+ plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -451,7 +384,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -459,15 +393,21 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
+
+ if (plat_dat)
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 20d3a202bb8d..5d279fa54b3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -51,7 +51,7 @@ struct imx_dwmac_ops {
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -192,7 +192,20 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
/* nothing to do now */
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct imx_priv_data *dwmac = bsp_priv;
+
+ interface = dwmac->plat_dat->mac_interface;
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
+}
+
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
@@ -208,7 +221,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
rate = rgmii_clock(speed);
if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -217,7 +230,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -358,7 +371,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -371,8 +383,13 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dwmac_init;
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index ddee6154d40b..599def7b3a64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,31 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "Invalid speed\n");
- return;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -89,8 +70,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
* platform_match().
*/
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
@@ -132,6 +111,9 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
plat_dat->bsp_priv = dwmac;
plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 48acba5eb178..c8bb9265bbb4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -415,6 +469,95 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
+
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
+
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -624,6 +767,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -639,20 +784,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -705,10 +859,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -746,10 +908,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -835,6 +1005,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1217,8 +1436,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..a12f8e65f89f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -50,4 +50,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 61227dcf56dc..ca4035cbb55b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
@@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -478,7 +475,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index ab7c2750c104..1a93787056a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -151,8 +151,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = {
.setup = loongson_gmac_data,
};
-static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
- unsigned int mode)
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
struct loongson_data *ld = (struct loongson_data *)priv;
struct net_device *ndev = dev_get_drvdata(ld->dev);
@@ -534,10 +533,10 @@ static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
struct stmmac_pci_info *info;
- struct stmmac_resources res;
struct loongson_data *ld;
- int ret, i;
+ int ret;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -567,17 +566,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
/* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
- if (ret)
- goto err_disable_device;
- break;
- }
-
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
@@ -590,6 +582,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
@@ -622,7 +617,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct loongson_data *ld;
- int i;
ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
@@ -633,13 +627,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
loongson_dwmac_msi_clear(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index c9636832a570..d178d5ddc7c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -456,7 +456,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +463,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -587,6 +580,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
+ priv_plat->phy_mode = plat->phy_interface;
plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 5469fa1b429e..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 9c2d62d133ad..a50782994b97 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -417,11 +417,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2a5b38723635..0e4da216f942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -111,7 +111,7 @@ struct qcom_ethqos {
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
+ int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -169,30 +169,17 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
-
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
-
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
@@ -699,7 +686,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
return ethqos->configure_func(ethqos);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
@@ -807,9 +794,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a4dc89e23a68..700858ff6f7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,8 @@ struct rk_gmac_ops {
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -91,6 +93,76 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
+#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
+#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
@@ -101,13 +173,6 @@ struct rk_priv_data {
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_PHY_INTF_SEL_RMII);
}
@@ -181,13 +246,6 @@ static const struct rk_gmac_ops px30_ops = {
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RGMII |
RK3128_GMAC_RMII_MODE_CLR);
@@ -199,13 +257,6 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
@@ -214,11 +265,6 @@ static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_CLK_2_5M);
@@ -236,11 +282,6 @@ static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_RMII_CLK_2_5M |
@@ -297,13 +338,6 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
@@ -316,13 +350,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RMII |
RK3228_GMAC_RMII_MODE);
@@ -335,11 +362,6 @@ static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_CLK_2_5M);
@@ -357,11 +379,6 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_RMII_CLK_2_5M |
@@ -378,6 +395,8 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
@@ -385,7 +404,8 @@ static const struct rk_gmac_ops rk3228_ops = {
.set_to_rmii = rk3228_set_to_rmii,
.set_rgmii_speed = rk3228_set_rgmii_speed,
.set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
@@ -419,13 +439,6 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
@@ -437,13 +450,6 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
@@ -452,11 +458,6 @@ static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_CLK_2_5M);
@@ -474,11 +475,6 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_RMII_CLK_2_5M |
@@ -511,13 +507,6 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
@@ -526,11 +515,6 @@ static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_SPEED_10M);
@@ -583,13 +567,6 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_PHY_INTF_SEL_RGMII |
RK3328_GMAC_RMII_MODE_CLR |
@@ -603,14 +580,8 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -623,11 +594,6 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_CLK_2_5M);
@@ -646,11 +612,6 @@ static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -670,6 +631,8 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
@@ -677,7 +640,8 @@ static const struct rk_gmac_ops rk3328_ops = {
.set_to_rmii = rk3328_set_to_rmii,
.set_rgmii_speed = rk3328_set_rgmii_speed,
.set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
@@ -711,13 +675,6 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
@@ -729,13 +686,6 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
@@ -744,11 +694,6 @@ static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_CLK_2_5M);
@@ -766,11 +711,6 @@ static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_RMII_CLK_2_5M |
@@ -822,13 +762,6 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
@@ -840,13 +773,6 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
@@ -855,11 +781,6 @@ static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_CLK_2_5M);
@@ -877,11 +798,6 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_RMII_CLK_2_5M |
@@ -933,13 +849,6 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
@@ -951,13 +860,6 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
@@ -966,11 +868,6 @@ static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_CLK_2_5M);
@@ -988,11 +885,6 @@ static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_RMII_CLK_2_5M |
@@ -1013,6 +905,149 @@ static const struct rk_gmac_ops rk3399_ops = {
.set_rmii_speed = rk3399_set_rmii_speed,
};
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
+ else
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
+}
+
+static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV50);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV5);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV1);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg, val;
+
+ if (speed == 10)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
+ RK3528_GMAC0_CLK_RMII_DIV20;
+ else if (speed == 100)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
+ RK3528_GMAC0_CLK_RMII_DIV2;
+ else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return;
+ }
+
+ reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
+ RK3528_VO_GRF_GMAC_CON;
+
+ regmap_write(bsp_priv->grf, reg, val);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
+ } else {
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
+ }
+}
+
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_rgmii_speed = rk3528_set_rgmii_speed,
+ .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RK3568_GRF_GMAC0_CON0 0x0380
#define RK3568_GRF_GMAC0_CON1 0x0384
#define RK3568_GRF_GMAC1_CON0 0x0388
@@ -1037,14 +1072,8 @@ static const struct rk_gmac_ops rk3399_ops = {
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1062,14 +1091,8 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
@@ -1147,14 +1170,8 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1180,14 +1197,8 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1254,6 +1265,7 @@ static const struct rk_gmac_ops rk3576_ops = {
.set_rgmii_speed = rk3576_set_gmac_speed,
.set_rmii_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1306,14 +1318,8 @@ static const struct rk_gmac_ops rk3576_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
@@ -1334,13 +1340,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
@@ -1401,6 +1400,7 @@ static const struct rk_gmac_ops rk3588_ops = {
.set_rgmii_speed = rk3588_set_gmac_speed,
.set_rmii_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1423,13 +1423,6 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1438,11 +1431,6 @@ static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_RMII_CLK_2_5M |
@@ -1491,13 +1479,6 @@ static const struct rk_gmac_ops rv1108_ops = {
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RGMII |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
@@ -1516,13 +1497,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1578,50 +1552,6 @@ static const struct rk_gmac_ops rv1126_ops = {
.set_rmii_speed = rv1126_set_rmii_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
@@ -1749,7 +1679,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1812,8 +1742,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1903,16 +1847,16 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
pm_runtime_put_sync(&gmac->pdev->dev);
@@ -1920,9 +1864,10 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct rk_priv_data *bsp_priv = priv;
+ struct rk_priv_data *bsp_priv = bsp_priv_;
struct device *dev = &bsp_priv->pdev->dev;
switch (bsp_priv->phy_iface) {
@@ -1940,6 +1885,8 @@ static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
+
+ return 0;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1966,9 +1913,13 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
+ if (!plat_dat->has_gmac4) {
plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -2044,6 +1995,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 9cc0e5817416..221539d760bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -100,24 +100,6 @@ static void s32_gmac_exit(struct platform_device *pdev, void *priv)
clk_disable_unprepare(gmac->rx_clk);
}
-static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct s32_priv_data *gmac = priv;
- long tx_clk_rate;
- int ret;
-
- tx_clk_rate = rgmii_clock(speed);
- if (tx_clk_rate < 0) {
- dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
- return;
- }
-
- dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
- ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
- if (ret)
- dev_err(gmac->dev, "Can't set tx clock\n");
-}
-
static int s32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat;
@@ -172,7 +154,9 @@ static int s32_dwmac_probe(struct platform_device *pdev)
plat->init = s32_gmac_init;
plat->exit = s32_gmac_exit;
- plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat->bsp_priv = gmac;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..116855658559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -61,7 +61,7 @@ struct socfpga_dwmac {
struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
@@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev)
dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..3303784cbbf8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->unicast_filter_entries = 1;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 0a0a363d3730..2013d7477eb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -27,27 +27,9 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- long rate;
- int err;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
@@ -122,9 +104,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -139,9 +121,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index f25461c292fe..be57c6c12c1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -99,12 +99,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
@@ -132,7 +132,7 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
@@ -185,7 +185,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -204,12 +205,7 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -268,7 +264,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1fcb74e9e3ff..c3d321192581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -538,6 +538,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
ret = stm32_dwmac_init(plat_dat, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..85723a78793a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1155,11 +1155,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1219,10 +1218,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1230,7 +1225,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9ae318436c4a..9f098ff0ff05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void sun7i_fix_speed(void *priv, int speed, unsigned int mode)
{
struct sunxi_priv_data *gmac = priv;
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index dce84ed184e9..c72ee759aae5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -45,9 +45,6 @@
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
-#define GMAC_GMII_RGMII_RATE 125000000
-#define GMAC_MII_RATE 25000000
-
struct thead_dwmac {
struct plat_stmmacenet_data *plat;
void __iomem *apb_base;
@@ -104,11 +101,13 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
return 0;
}
-static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
+ struct thead_dwmac *dwmac = bsp_priv;
struct plat_stmmacenet_data *plat;
- struct thead_dwmac *dwmac = priv;
unsigned long rate;
+ long tx_rate;
u32 div, reg;
plat = dwmac->plat;
@@ -116,44 +115,37 @@ static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
switch (plat->mac_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
- return;
+ return 0;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
rate = clk_get_rate(plat->stmmac_clk);
- if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 ||
- rate % GMAC_MII_RATE != 0) {
- dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate);
- return;
- }
writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
- switch (speed) {
- case SPEED_1000:
- div = rate / GMAC_GMII_RGMII_RATE;
- break;
- case SPEED_100:
- div = rate / GMAC_MII_RATE;
- break;
- case SPEED_10:
- div = rate * 10 / GMAC_MII_RATE;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
}
reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
- break;
+ return 0;
+
default:
dev_err(dwmac->dev, "unsupported phy interface %d\n",
plat->mac_interface);
- return;
+ return -EINVAL;
}
}
@@ -245,7 +237,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
dwmac->plat = plat;
dwmac->apb_base = apb;
plat->bsp_priv = dwmac;
- plat->fix_mac_speed = thead_dwmac_fix_speed;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
plat->init = thead_dwmac_init;
return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index eccf7f537467..33cf99797df5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -54,7 +54,7 @@ struct visconti_eth {
spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct visconti_eth *dwmac = priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..967a16212faf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -59,22 +59,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..a8b901cdf5cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "stmmac_ptp.h"
@@ -342,31 +343,24 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -509,8 +503,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
@@ -633,7 +626,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
}
netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
- on ? "enabled" : "disabled");
+ str_enabled_disabled(on));
writel(tcr_val, ptpaddr + PTP_TCR);
/* wait for auxts fifo clear to finish */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 184d41a306af..42fe29a4e300 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -177,23 +177,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9ed8620580a8..cc4ddf608652 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -376,33 +376,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -413,34 +426,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -849,17 +841,17 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -1201,9 +1193,7 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1245,9 +1235,7 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1291,9 +1279,7 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 20027d3c25a7..a03f5d771566 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -112,14 +112,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 9a60a6e8f633..a6d395c6bacd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -316,17 +316,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -425,29 +425,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -457,9 +456,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -1525,8 +1524,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1582,8 +1580,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 0f200b72c225..27c63a9fc163 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -306,6 +306,12 @@ struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
@@ -360,10 +366,9 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -467,12 +472,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f05cae103d83..bddfa0f4aa21 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -106,6 +106,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -257,7 +259,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
@@ -265,8 +267,7 @@ struct stmmac_priv {
int sph;
int sph_cap;
u32 sarc_type;
-
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -281,9 +282,7 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
struct phylink_config phylink_config;
@@ -307,6 +306,7 @@ struct stmmac_priv {
struct timer_list eee_ctrl_timer;
int lpi_irq;
u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
bool eee_enabled;
bool eee_active;
bool eee_sw_timer_en;
@@ -343,7 +343,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -407,6 +407,8 @@ int stmmac_dvr_probe(struct device *device,
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ae7db96f46..279532609707 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -88,19 +88,20 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
@@ -178,6 +179,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ long rate = rgmii_clock(speed);
+
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -186,14 +219,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -390,16 +420,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
-}
-
-static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
-}
-
static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
@@ -436,8 +456,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
}
/**
@@ -447,8 +467,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
*/
static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -466,74 +486,6 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
stmmac_try_to_start_sw_lpi(priv);
}
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * @active: indicates whether EEE should be enabled.
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
-{
- priv->eee_active = active;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee) {
- priv->eee_enabled = false;
- return;
- }
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- priv->eee_sw_timer_en = false;
- stmmac_disable_hw_lpi_timer(priv);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- priv->eee_enabled = false;
- mutex_unlock(&priv->lock);
- return;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- /* Use hardware LPI mode */
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- priv->eee_sw_timer_en = false;
- stmmac_enable_hw_lpi_timer(priv);
- } else {
- /* Use software LPI mode */
- priv->eee_sw_timer_en = true;
- stmmac_disable_hw_lpi_timer(priv);
- stmmac_restart_sw_lpi_timer(priv);
- }
-
- priv->eee_enabled = true;
-
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -935,14 +887,16 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
}
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
@@ -1002,7 +956,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1070,8 +1026,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1082,19 +1036,29 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure transmit clock for %dMbps: %pe\n",
+ speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1110,16 +1074,70 @@ static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- stmmac_eee_init(priv, false);
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
}
static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
bool tx_clk_stop)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
priv->tx_lpi_timer = timer;
- stmmac_eee_init(priv, true);
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
return 0;
}
@@ -1132,6 +1150,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_link_up = stmmac_mac_link_up,
.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_finish = stmmac_mac_finish,
};
/**
@@ -1254,6 +1273,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
priv->phylink_config.eee_rx_clk_stop_enable = true;
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.default_an_inband =
@@ -2524,9 +2547,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2610,6 +2644,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -3072,7 +3108,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
@@ -3081,7 +3117,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
@@ -3199,8 +3235,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3448,9 +3483,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3458,6 +3502,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3568,7 +3613,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -3640,7 +3687,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3769,9 +3815,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3794,9 +3839,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3997,7 +4041,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4102,9 +4145,6 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
@@ -5454,10 +5494,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5570,7 +5610,7 @@ read_again:
net_prefetch(page_address(buf->page) +
buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5856,6 +5896,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5988,7 +6031,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6272,7 +6317,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6628,6 +6675,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6659,6 +6709,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6970,8 +7023,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7444,7 +7496,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7640,9 +7692,6 @@ int stmmac_dvr_probe(struct device *device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
-
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Setup channels NAPI */
@@ -7744,8 +7793,6 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7816,19 +7863,16 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7912,16 +7956,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7931,14 +7971,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
@@ -7961,9 +8012,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0c7d81ddd440..836f2848dfeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -524,6 +524,9 @@ int stmmac_pcs_setup(struct net_device *ndev)
if (ret)
return dev_err_probe(priv->device, ret, "No xPCS found\n");
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
priv->hw->xpcs = xpcs;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..9c1b54b701f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -155,9 +155,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -192,9 +192,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +204,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -226,21 +224,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index d0e61aa1a495..c73eff6a56b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -405,6 +405,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
+
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
@@ -486,8 +497,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+ }
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -538,11 +552,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..a01bc394d1ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..fb1a4a2e4dbc 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
- tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3a13d60a947a..a07c910c497a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -205,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select PHYLIB
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2806238629f8..b3118bf0757e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -164,6 +164,7 @@
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
#define AM65_CPSW_XDP_CONSUMED BIT(1)
#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
@@ -829,19 +830,19 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = *(swdata);
+ skb = swdata->skb;
dev_kfree_skb_any(skb);
} else {
- xdpf = *(swdata);
+ xdpf = swdata->xdpf;
xdp_return_frame(xdpf);
}
@@ -1098,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
- void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -1131,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
- *(swdata) = xdpf;
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1167,17 +1169,16 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
- struct xdp_buff *xdp,
- int cpu, int *len)
+ struct xdp_buff *xdp, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct net_device *ndev = port->ndev;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
struct bpf_prog *prog;
- struct page *page;
int pkt_len;
u32 act;
int err;
@@ -1193,8 +1194,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
switch (act) {
case XDP_PASS:
- ret = AM65_CPSW_XDP_PASS;
- goto out;
+ return AM65_CPSW_XDP_PASS;
case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1213,15 +1213,13 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_CONSUMED;
- goto out;
+ return AM65_CPSW_XDP_TX;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_REDIRECT;
- goto out;
+ return AM65_CPSW_XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
@@ -1233,10 +1231,6 @@ drop:
ndev->stats.rx_dropped++;
}
- page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true);
-
-out:
return ret;
}
@@ -1274,7 +1268,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
- int cpu, int *xdp_state)
+ int *xdp_state)
{
struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1334,8 +1328,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
- cpu, &pkt_len);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1401,7 +1400,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
struct am65_cpsw_common *common = flow->common;
- int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
@@ -1410,7 +1408,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
/* process only this flow */
cur_budget = budget;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
@@ -1438,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
-
- return skb;
-}
-
-static struct xdp_frame *
-am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
- struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma,
- struct net_device **ndev)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct am65_cpsw_port *port;
- struct xdp_frame *xdpf;
- u32 port_id = 0;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- xdpf = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- port = am65_common_get_port(common, port_id);
- dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
- *ndev = port->ndev;
-
- return xdpf;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1504,13 +1456,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1518,9 +1474,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1531,27 +1490,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes = skb->len;
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes = xdpf->len;
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1560,66 +1535,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- enum am65_cpsw_tx_buf_type buf_type;
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- } else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes += xdpf->len;
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
- }
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1635,13 +1550,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1685,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1736,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -2306,13 +2217,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2323,19 +2238,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
tx_chn->id, tx_chn->irq, ret);
goto err;
}
-
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
}
return 0;
err:
- for (--i ; i >= 0 ; i--) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
-
- netif_napi_del(&tx_chn->napi_tx);
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
}
return ret;
@@ -2565,9 +2477,11 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2577,11 +2491,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err_flow;
+ goto err_request_irq;
}
-
- netif_napi_add(common->dma_ndev, &flow->napi_rx,
- am65_cpsw_nuss_rx_poll);
}
/* setup classifier to route priorities to flows */
@@ -2589,11 +2500,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
return 0;
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
err_flow:
- for (--i; i >= 0 ; i--) {
+ for (--i; i >= 0; i--) {
flow = &rx_chn->flows[i];
- netif_napi_del(&flow->napi_rx);
devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
err:
@@ -3578,6 +3492,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e7832a5cf3cc..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow {
char name[32];
};
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
struct am65_cpsw_swdata {
u32 flow_id;
struct page *page;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 0cb6fa6e5b7d..a984b7d84e5e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index cec0a90659d9..5b5b52e4e7a7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -1409,7 +1413,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
- ndev->netns_local = true;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1418,6 +1422,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index d59c1744840a..b4a34c57b7b4 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -406,9 +406,25 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
@@ -419,10 +435,12 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
/* Enable CMP 1 */
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
@@ -447,6 +465,10 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ req->period.nsec;
icss_iep_update_to_next_boundary(iep, start_ns);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
/* Enable Sync in single shot mode */
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
@@ -474,7 +496,37 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- return -EOPNOTSUPP;
+ int ret = 0;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
@@ -549,10 +601,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..46f500b90b17 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
if (rx_chn->desc_pool)
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -93,11 +98,20 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
+ struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(desc);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(page->pp, swdata->data.page);
+ goto free_desc;
+ }
+
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -121,6 +135,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -131,12 +146,13 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_tx;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &emac->tx_chns[chn];
@@ -157,20 +173,27 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- /* was this command's TX complete? */
- if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
continue;
}
- skb = *(swdata);
prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
num_tx++;
}
@@ -249,9 +272,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
@@ -461,17 +483,17 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void **swdata;
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
@@ -479,25 +501,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
-EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -535,18 +551,170 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns);
}
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @page: page from page pool if already DMA mapped
+ * @q_idx: queue id
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ goto drop_free_descs; /* drop */
+ }
+
+ if (page) { /* already DMA mapped by page_pool */
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (page) {
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+ } else {
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+ }
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @page: page with RX data if already DMA mapped
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+ result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ if (result == ICSSG_XDP_CONSUMED)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- void **swdata;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 *psdata;
+ void *pa;
int ret;
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -558,15 +726,15 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return 0;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -574,32 +742,63 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
/* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
+ * descriptor back to the ring with old page to prevent a stall
*/
- if (!new_skb) {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (emac->xdp_prog) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ if (*xdp_state == ICSSG_XDP_PASS)
+ skb = xdp_build_skb_from_buff(&xdp);
+ else
+ goto requeue;
} else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- if (emac->prueth->is_switch_mode)
- skb->offload_fwd_mark = emac->offload_fwd_mark;
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
+ }
+
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
}
+ skb_reserve(skb, PRUETH_HEADROOM);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
/* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(pool, new_page);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -611,22 +810,19 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct page *page;
+ pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
}
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
@@ -662,13 +858,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
- void **swdata;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -730,7 +926,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
@@ -833,15 +1030,27 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
- dev_kfree_skb_any(skb);
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
}
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
@@ -875,15 +1084,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
+ u32 xdp_state;
int ret;
while (flow--) {
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
@@ -893,6 +1105,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer,
@@ -907,29 +1122,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct sk_buff *skb;
+ struct page_pool *pool;
+ struct page *page;
int i, ret;
+ pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ chn->pg_pool = pool;
+
for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push(emac, skb, chn);
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
+ "cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
- kfree_skb(skb);
- return ret;
+ page_pool_recycle_direct(pool, page);
+ goto recycle_alloc_pg;
}
}
return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
@@ -958,6 +1215,9 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
prueth_rx_cleanup, !!i);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
+
+ page_pool_destroy(chn->pg_pool);
+ chn->pg_pool = NULL;
}
EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 00ed97860547..443f90fa6557 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -559,6 +559,33 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
struct net_device *real_dev;
@@ -780,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev)
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_xdp_rxqs(emac);
if (ret)
goto reset_rx_chn;
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto destroy_xdp_rxqs;
+
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
@@ -809,6 +840,8 @@ reset_tx_chan:
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
+destroy_xdp_rxqs:
+ prueth_destroy_xdp_rxqs(emac);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
free_tx_ts_irq:
@@ -879,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev)
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
-
+ prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
@@ -1024,6 +1057,93 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
return 0;
}
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Return: number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct net_device *ndev = emac->ndev;
+ struct xdp_frame *xdpf;
+ unsigned int q_idx;
+ int nxmit = 0;
+ u32 err;
+ int i;
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ if (err != ICSSG_XDP_TX) {
+ ndev->stats.tx_dropped++;
+ break;
+ }
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @bpf: XDP program
+ *
+ * Return: Always 0 (Success)
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+ xdp_features_t val;
+
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_set_features_flag(emac->ndev, val);
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
+ return 0;
+}
+
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @bpf: XDP program
+ *
+ * Return: 0 on success, error code on failure.
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1038,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_fix_features = emac_ndo_fix_features,
.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1066,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ emac->xdp_prog = NULL;
+ emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) {
ret = -ENOMEM;
@@ -1169,9 +1293,8 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1522,6 +1645,9 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
+ BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
+ "insufficient SW_DATA size");
+
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
@@ -1679,6 +1805,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 329b46e9ee53..b6be4aa57a61 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -8,6 +8,8 @@
#ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
@@ -33,6 +35,8 @@
#include <linux/dma/k3-udma-glue.h>
#include <net/devlink.h>
+#include <net/xdp.h>
+#include <net/page_pool/helpers.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -131,6 +135,26 @@ struct prueth_rx_chn {
u32 descs_num;
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+};
+
+struct prueth_swdata {
+ enum prueth_swdata_type type;
+ union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+ } data;
};
/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@ -140,6 +164,12 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
/* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20
@@ -208,8 +238,14 @@ struct prueth_emac {
unsigned long rx_pace_timeout_ns;
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
};
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
@@ -305,6 +341,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
@@ -410,9 +448,10 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
u32 max_desc_num);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn);
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len);
+unsigned int prueth_rxbuf_total_len(unsigned int len);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -441,5 +480,9 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx);
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 64a19ff39562..ff5f41bf499e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
__le32 *data = emac->cmd_data;
dma_addr_t desc_dma, buf_dma;
struct prueth_tx_chn *tx_chn;
- void **swdata;
+ struct prueth_swdata *swdata;
int ret = 0;
u32 *epib;
@@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = data;
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = le32_to_cpu(data[0]);
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
@@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac)
* Returns skb pointer if packet found else NULL
* Caller must free the returned skb.
*/
-static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
- u32 flow_id)
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- u32 buf_dma_len, pkt_len;
- void **swdata;
+ u32 buf_dma_len;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
}
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->data.page;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
/* if allocation fails we drop the packet but push the
* descriptor back to the ring with old skb to prevent a stall
*/
- if (!new_skb) {
+ if (!new_page) {
netdev_err(ndev,
- "skb alloc failed, dropped mgm pkt from flow %d\n",
+ "page alloc failed, dropped mgm pkt from flow %d\n",
flow_id);
- new_skb = skb;
- skb = NULL; /* return NULL */
- } else {
- /* return the filled skb */
- skb_put(skb, pkt_len);
+ new_page = page;
+ page = NULL; /* return NULL */
}
/* queue another DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(rx_chn->pg_pool, new_page);
- return skb;
+ return page;
}
static void prueth_tx_ts_sr1(struct prueth_emac *emac,
@@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac,
static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!page)
return IRQ_NONE;
- prueth_tx_ts_sr1(emac, (void *)skb->data);
- dev_kfree_skb_any(skb);
+ prueth_tx_ts_sr1(emac, (void *)page_address(page));
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
@@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
u32 rsp;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!page)
return IRQ_NONE;
/* Process command response */
- rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
complete(&emac->cmd_complete);
@@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- dev_kfree_skb_any(skb);
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..6f0edae38ea2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e46ccebcfd22..47e3e8434b9e 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config LIBWX
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
select PHYLINK
help
@@ -42,6 +44,7 @@ config TXGBE
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
select PHYLINK
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..e9f0f1f2309b 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index abe5921dde02..43019ec9329c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
static const struct wx_stats wx_gstrings_fdir_stats[] = {
@@ -69,7 +72,7 @@ int wx_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return (wx->mac.type == wx_mac_sp) ?
+ return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ?
WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
@@ -87,7 +90,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
@@ -121,7 +124,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
@@ -196,7 +199,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
@@ -216,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -225,6 +231,9 @@ int wx_get_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
@@ -234,6 +243,9 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -243,6 +255,9 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return;
+
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -252,6 +267,9 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
@@ -322,10 +340,17 @@ int wx_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ max_eitr = WX_AML_MAX_EITR;
+ break;
+ default:
max_eitr = WX_EM_MAX_EITR;
+ break;
+ }
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
@@ -347,10 +372,15 @@ int wx_set_coalesce(struct net_device *netdev,
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
tx_itr_param = WX_12K_ITR;
- else
+ break;
+ default:
tx_itr_param = WX_20K_ITR;
+ break;
+ }
} else {
tx_itr_param = wx->tx_itr_setting;
}
@@ -383,10 +413,15 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
max_combined = 63;
- else
+ break;
+ default:
max_combined = 8;
+ break;
+ }
}
return max_combined;
@@ -452,3 +487,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data)
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (wx->ptp_clock)
+ info->phc_index = ptp_clock_index(wx->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_ts_info);
+
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (wx->ptp_clock) {
+ ts_stats->pkts = wx->tx_hwtstamp_pkts;
+ ts_stats->lost = wx->tx_hwtstamp_timeouts +
+ wx->tx_hwtstamp_skipped +
+ wx->rx_hwtstamp_cleared;
+ ts_stats->err = wx->tx_hwtstamp_errors;
+ }
+}
+EXPORT_SYMBOL(wx_get_ptp_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 600c3b597d1a..9e002e699eca 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index deaf670c160e..aed45abafb1b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -112,10 +112,15 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
+ break;
+ default:
+ break;
}
}
@@ -126,10 +131,16 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
+ break;
+ default:
+ break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
@@ -278,22 +289,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
return ret;
}
-/**
- * wx_host_interface_command - Issue command to manageability block
- * @wx: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- * @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
- **/
-int wx_host_interface_command(struct wx *wx, u32 *buffer,
- u32 length, u32 timeout, bool return_data)
+static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
u32 hicr, i, bi, buf[64] = {};
@@ -301,22 +298,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 dword_len;
u16 buf_len;
- if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
- return -EINVAL;
- }
-
status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
- /* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
- wx_err(wx, "Buffer length failure, not aligned to dword");
- status = -EINVAL;
- goto rel_out;
- }
-
dword_len = length >> 2;
/* The device driver writes the relevant command block
@@ -391,8 +376,160 @@ rel_out:
wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
+
+static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
+{
+ u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
+ struct wx_hic_hdr *recv_hdr;
+ u32 i;
+
+ /* read hdr */
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+ /* check hdr */
+ recv_hdr = (struct wx_hic_hdr *)buffer;
+ if (recv_hdr->cmd == send_cmd &&
+ recv_hdr->index == wx->swfw_index)
+ return true;
+
+ return false;
+}
+
+static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ bool busy, reply;
+ u32 dword_len;
+ u16 buf_len;
+ int err = 0;
+ u8 send_cmd;
+ u32 i;
+
+ /* wait to get lock */
+ might_sleep();
+ err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
+ false, WX_STATE_SWFW_BUSY, wx->state);
+ if (err)
+ return err;
+
+ /* index to unique seq id for each mbox message */
+ hdr->index = wx->swfw_index;
+ send_cmd = hdr->cmd;
+
+ dword_len = length >> 2;
+ /* write data to SW-FW mbox array */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ rd32a(wx, WX_SW2FW_MBOX, i);
+ }
+
+ /* generate interrupt to notify FW */
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
+
+ /* polling reply from FW */
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
+ true, wx, buffer, send_cmd);
+ if (err) {
+ wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
+ send_cmd, wx->swfw_index);
+ goto rel_out;
+ }
+
+ /* expect no reply from FW then return */
+ if (!return_data)
+ goto rel_out;
+
+ /* If there is any thing in data position pull it in */
+ buf_len = hdr->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wx, "Buffer not large enough for reply message.\n");
+ err = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+ for (i = hdr_size >> 2; i <= dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+rel_out:
+ /* index++, index replace wx_hic_hdr.checksum */
+ if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
+ wx->swfw_index = 0;
+ else
+ wx->swfw_index++;
+
+ clear_bit(WX_STATE_SWFW_BUSY, wx->state);
+ return err;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wx: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wx, "Buffer length failure, not aligned to dword");
+ return -EINVAL;
+ }
+
+ if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return wx_host_interface_command_r(wx, buffer, length,
+ timeout, return_data);
+
+ return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
+}
EXPORT_SYMBOL(wx_host_interface_command);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
+{
+ struct wx_hic_set_pps pps_cmd;
+
+ pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
+ pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
+ pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ pps_cmd.lan_id = wx->bus.func;
+ pps_cmd.enable = (u8)enable;
+ pps_cmd.nsec = nsec;
+ pps_cmd.cycles = cycles;
+ pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ return wx_host_interface_command(wx, (u32 *)&pps_cmd,
+ sizeof(pps_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+}
+
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
@@ -423,7 +560,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
if (status != 0)
return status;
- *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ else
+ *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
@@ -467,6 +607,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
u16 words_to_read;
u32 value = 0;
int status;
+ u32 mbox;
u32 i;
/* Take semaphore for the entire operation. */
@@ -499,8 +640,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
goto out;
}
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ mbox = WX_MNG_MBOX;
+ else
+ mbox = WX_FW2SW_MBOX;
for (i = 0; i < words_to_read; i++) {
- u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+ u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
@@ -550,12 +695,17 @@ void wx_init_eeprom_params(struct wx *wx)
}
}
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
+ break;
+ default:
+ break;
}
eeprom->sw_region_offset = data;
@@ -616,8 +766,15 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wx->mac.type == wx_mac_sp)
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+ break;
+ default:
+ break;
+ }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -755,9 +912,14 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
+ break;
+ default:
+ break;
}
}
@@ -1699,7 +1861,7 @@ void wx_configure_rx(struct wx *wx)
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
u32 psrctl;
/* RSC Setup */
@@ -2351,7 +2513,7 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 11fb33349482..b883342bb576 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles);
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2b3d6586f44a..00b0b318df27 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -13,6 +13,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_ptp.h"
#include "wx_hw.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
@@ -597,8 +598,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = netdev_priv(rx_ring->netdev);
+
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
+ wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -705,6 +715,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
{
unsigned int budget = q_vector->wx->tx_work_limit;
unsigned int total_bytes = 0, total_packets = 0;
+ struct wx *wx = netdev_priv(tx_ring->netdev);
unsigned int i = tx_ring->next_to_clean;
struct wx_tx_buffer *tx_buffer;
union wx_tx_desc *tx_desc;
@@ -737,6 +748,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ /* schedule check for Tx timestamp */
+ if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
+ skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ ptp_schedule_worker(wx->ptp_clock, 0);
+
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -932,9 +948,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-static void wx_tx_map(struct wx_ring *tx_ring,
- struct wx_tx_buffer *first,
- const u8 hdr_len)
+static int wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
@@ -1013,6 +1029,8 @@ static void wx_tx_map(struct wx_ring *tx_ring,
netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there
@@ -1038,7 +1056,7 @@ static void wx_tx_map(struct wx_ring *tx_ring,
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -1062,6 +1080,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -ENOMEM;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
@@ -1082,26 +1102,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
-{
- struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
-
- *nexthdr = hdr->nexthdr;
- offset += sizeof(struct ipv6hdr);
- while (ipv6_ext_hdr(*nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- if (*nexthdr == NEXTHDR_NONE)
- return;
- hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
- if (!hp)
- return;
- if (*nexthdr == NEXTHDR_FRAGMENT)
- break;
- *nexthdr = hp->nexthdr;
- }
-}
-
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
@@ -1112,6 +1112,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
union network_header hdr;
@@ -1122,14 +1124,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
- if (tun_prot == IPPROTO_IPIP) {
+ if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
@@ -1166,7 +1172,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
l4_prot = hdr.ipv4->protocol;
break;
case 6:
- wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
+ l4_hdr = skb_inner_transport_header(skb);
+ exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = inner_ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1179,7 +1189,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1269,13 +1283,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
+
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
break;
default:
break;
@@ -1298,6 +1319,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1335,12 +1357,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
@@ -1362,7 +1387,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &tun_prot, &frag_off);
break;
default:
return;
@@ -1386,6 +1416,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1408,7 +1439,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
+ exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
l4_prot = network_hdr.ipv6->nexthdr;
+ if (transport_hdr.raw != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
break;
default:
break;
@@ -1428,7 +1462,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_L4LEN_SHIFT;
break;
default:
- break;
+ skb_checksum_help(skb);
+ goto csum_failed;
}
/* update TX checksum flag */
@@ -1486,6 +1521,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ wx->ptp_clock) {
+ if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
+ wx->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= WX_TX_FLAGS_TSTAMP;
+ wx->ptp_tx_skb = skb_get(skb);
+ wx->ptp_tx_start = jiffies;
+ } else {
+ wx->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
@@ -1501,12 +1550,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
- wx_tx_map(tx_ring, first, hdr_len);
+ if (wx_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ wx->tx_hwtstamp_errors++;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ }
return NETDEV_TX_OK;
}
@@ -1781,10 +1838,16 @@ static int wx_alloc_q_vector(struct wx *wx,
/* initialize pointer to rings */
ring = q_vector->ring;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
default_itr = WX_12K_ITR;
- else
+ break;
+ default:
default_itr = WX_7K_ITR;
+ break;
+ }
+
/* initialize ITR */
if (txr_count && !rxr_count)
/* tx only vector */
@@ -2140,10 +2203,17 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
+ break;
+ default:
itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+ break;
+ }
itr_reg |= WX_PX_ITR_CNT_WDIS;
@@ -2719,7 +2789,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features = features;
- if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
wx->do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
@@ -2751,7 +2821,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
break;
}
- if (need_reset)
+ if (need_reset && wx->do_reset)
wx->do_reset(netdev);
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
new file mode 100644
index 000000000000..07c015ba338f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+/* Copyright (c) 1999 - 2025 Intel Corporation. */
+
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_ptp.h"
+#include "wx_hw.h"
+
+#define WX_INCVAL_10GB 0xCCCCCC
+#define WX_INCVAL_1GB 0x800000
+#define WX_INCVAL_100 0xA00000
+#define WX_INCVAL_10 0xC7F380
+#define WX_INCVAL_EM 0x2000000
+
+#define WX_INCVAL_SHIFT_10GB 20
+#define WX_INCVAL_SHIFT_1GB 18
+#define WX_INCVAL_SHIFT_100 15
+#define WX_INCVAL_SHIFT_10 12
+#define WX_INCVAL_SHIFT_EM 22
+
+#define WX_OVERFLOW_PERIOD (HZ * 30)
+#define WX_PTP_TX_TIMEOUT (HZ)
+
+#define WX_1588_PPS_WIDTH_EM 120
+
+#define WX_NS_PER_SEC 1000000000ULL
+
+static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&wx->hw_tc_lock);
+ ns = timecounter_cyc2time(&wx->hw_tc, timestamp);
+ } while (read_seqretry(&wx->hw_tc_lock, seq));
+
+ return ns;
+}
+
+static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts)
+{
+ u32 timeh1, timeh2, timel;
+
+ timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_postts(sts);
+ timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+
+ if (timeh1 != timeh2) {
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_prets(sts);
+ }
+ return (u64)timel | (u64)timeh2 << 32;
+}
+
+static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 incval, mask;
+
+ smp_mb(); /* Force any pending update before accessing. */
+ incval = READ_ONCE(wx->base_incval);
+ incval = adjust_by_scaled_ppm(incval, ppb);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ return 0;
+}
+
+static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_adjtime(&wx->hw_tc, delta);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+static int wx_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 ns, stamp;
+
+ stamp = wx_ptp_readtime(wx, sts);
+ ns = wx_ptp_timecounter_cyc2time(wx, stamp);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int wx_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ /* reset the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc, ns);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+/**
+ * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
+ * @wx: the private board structure
+ *
+ * This function should be called whenever the state related to a Tx timestamp
+ * needs to be cleared. This helps ensure that all related bits are reset for
+ * the next Tx timestamp event.
+ */
+static void wx_ptp_clear_tx_timestamp(struct wx *wx)
+{
+ rd32ptp(wx, WX_TSC_1588_STMPH);
+ if (wx->ptp_tx_skb) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ }
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+}
+
+/**
+ * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp
+ * @wx: private board structure
+ * @hwtstamp: stack timestamp structure
+ * @timestamp: unsigned 64bit system time value
+ *
+ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
+ * which can be used by the stack's ptp functions.
+ *
+ * The lock is used to protect consistency of the cyclecounter and the SYSTIME
+ * registers. However, it does not need to protect against the Rx or Tx
+ * timestamp registers, as there can't be a new timestamp until the old one is
+ * unlatched by reading.
+ *
+ * In addition to the timestamp in hardware, some controllers need a software
+ * overflow cyclecounter, and this function takes this into account as well.
+ **/
+static void wx_ptp_convert_to_hwtstamp(struct wx *wx,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ u64 ns;
+
+ ns = wx_ptp_timecounter_cyc2time(wx, timestamp);
+ hwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @wx: the private board struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+static void wx_ptp_tx_hwtstamp(struct wx *wx)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = wx->ptp_tx_skb;
+ u64 regval = 0;
+
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL);
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval);
+
+ wx->ptp_tx_skb = NULL;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ wx->tx_hwtstamp_pkts++;
+}
+
+static int wx_ptp_tx_hwtstamp_work(struct wx *wx)
+{
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb to poll for a timestamp */
+ if (!wx->ptp_tx_skb) {
+ wx_ptp_clear_tx_timestamp(wx);
+ return 0;
+ }
+
+ /* stop polling once we have a valid timestamp */
+ tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL);
+ if (tsynctxctl & WX_TSC_1588_CTL_VALID) {
+ wx_ptp_tx_hwtstamp(wx);
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task periodically reads the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute for the fastest
+ * overflowing hardware. We run it for all hardware since it shouldn't have a
+ * large impact.
+ */
+static void wx_ptp_overflow_check(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->last_overflow_check +
+ WX_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+ /* Update the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_read(&wx->hw_tc);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to
+ * timestamp any future packets.
+ */
+static void wx_ptp_rx_hang(struct wx *wx)
+{
+ struct wx_ring *rx_ring;
+ unsigned long rx_event;
+ u32 tsyncrxctl;
+ int n;
+
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) {
+ wx->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = wx->last_rx_ptp_check;
+ for (n = 0; n < wx->num_rx_queues; n++) {
+ rx_ring = wx->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(wx, WX_PSR_1588_STMPH);
+ wx->last_rx_ptp_check = jiffies;
+
+ wx->rx_hwtstamp_cleared++;
+ dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang");
+ }
+}
+
+/**
+ * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @wx: private network wx structure
+ */
+static void wx_ptp_tx_hang(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->ptp_tx_start +
+ WX_PTP_TX_TIMEOUT);
+
+ if (!wx->ptp_tx_skb)
+ return;
+
+ if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ wx_ptp_clear_tx_timestamp(wx);
+ wx->tx_hwtstamp_timeouts++;
+ dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ int ts_done;
+
+ ts_done = wx_ptp_tx_hwtstamp_work(wx);
+
+ wx_ptp_overflow_check(wx);
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ wx->flags)))
+ wx_ptp_rx_hang(wx);
+ wx_ptp_tx_hang(wx);
+
+ return ts_done ? 1 : HZ;
+}
+
+static u64 wx_ptp_trigger_calc(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ unsigned long flags;
+ u64 ns = 0;
+ u32 rem;
+
+ /* Read the current clock time, and save the cycle counter value */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ ns = timecounter_read(&wx->hw_tc);
+ wx->pps_edge_start = wx->hw_tc.cycle_last;
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+ wx->pps_edge_end = wx->pps_edge_start;
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, WX_NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (WX_NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult);
+ wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) <<
+ cc->shift), cc->mult);
+
+ return (ns + rem);
+}
+
+static int wx_ptp_setup_sdp(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ u32 tsauxc;
+ u64 nsec;
+
+ if (wx->pps_width >= WX_NS_PER_SEC) {
+ wx_err(wx, "PTP pps width cannot be longer than 1s!\n");
+ return -EINVAL;
+ }
+
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) {
+ if (wx->pps_enabled) {
+ wx->pps_enabled = false;
+ wx_set_pps(wx, false, 0, 0);
+ }
+ return 0;
+ }
+
+ wx->pps_enabled = true;
+ nsec = wx_ptp_trigger_calc(wx);
+ wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_SDP(0),
+ WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H);
+ wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0);
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1);
+ WX_WRITE_FLUSH(wx);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult);
+
+ return 0;
+}
+
+static int wx_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+
+ /**
+ * When PPS is enabled, unmask the interrupt for the ClockOut
+ * feature, so that the interrupt handler can send the PPS
+ * event when the clock SDP triggers. Clear mask when PPS is
+ * disabled
+ */
+ if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.phase.sec || rq->perout.phase.nsec) {
+ wx_err(wx, "Absolute start time not supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
+ wx_err(wx, "Only 1pps is supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ wx->pps_width = timespec64_to_ns(&ts_on);
+ } else {
+ wx->pps_width = 120000000;
+ }
+
+ if (on)
+ set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ else
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+
+ return wx->ptp_setup_sdp(wx);
+}
+
+void wx_ptp_check_pps_event(struct wx *wx)
+{
+ u32 tsauxc, int_status;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!wx->ptp_clock)
+ return;
+
+ int_status = rd32ptp(wx, WX_TSC_1588_INT_ST);
+ if (int_status & WX_TSC_1588_INT_ST_TT1) {
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ wx_ptp_trigger_calc(wx);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ WX_WRITE_FLUSH(wx);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_check_pps_event);
+
+static long wx_ptp_create_clock(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(wx->ptp_clock))
+ return 0;
+
+ snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name),
+ "%s", netdev->name);
+ wx->ptp_caps.owner = THIS_MODULE;
+ wx->ptp_caps.n_alarm = 0;
+ wx->ptp_caps.n_ext_ts = 0;
+ wx->ptp_caps.pps = 0;
+ wx->ptp_caps.adjfine = wx_ptp_adjfine;
+ wx->ptp_caps.adjtime = wx_ptp_adjtime;
+ wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
+ wx->ptp_caps.settime64 = wx_ptp_settime64;
+ wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
+ if (wx->mac.type == wx_mac_em) {
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ } else {
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 0;
+ wx->ptp_setup_sdp = NULL;
+ }
+
+ wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
+ if (IS_ERR(wx->ptp_clock)) {
+ err = PTR_ERR(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ wx_err(wx, "ptp clock register failed\n");
+ return err;
+ } else if (wx->ptp_clock) {
+ dev_info(&wx->pdev->dev, "registered PHC device on %s\n",
+ netdev->name);
+ }
+
+ /* Set the default timestamp mode to disabled here. We do this in
+ * create_clock instead of initialization, because we don't want to
+ * override the previous settings during a suspend/resume cycle.
+ */
+ wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+static int wx_ptp_set_timestamp_mode(struct wx *wx,
+ struct kernel_hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED;
+ u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED;
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ bool is_l2 = false;
+ u32 regval;
+
+ memcpy(flags, wx->flags, sizeof(wx->flags));
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
+ clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2;
+ is_l2 = true;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ default:
+ /* register PSR_1588_MSG must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages unless hardware supports timestamping all
+ * packets => return error
+ */
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* define ethertype filter for timestamping L2 packets */
+ if (is_l2)
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588),
+ (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
+ WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0);
+
+ /* enable/disable TX */
+ regval = rd32ptp(wx, WX_TSC_1588_CTL);
+ regval &= ~WX_TSC_1588_CTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32ptp(wx, WX_TSC_1588_CTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(wx, WX_PSR_1588_CTL);
+ regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(wx, WX_PSR_1588_CTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl);
+
+ WX_WRITE_FLUSH(wx);
+
+ /* configure adapter flags only when HW is actually configured */
+ memcpy(wx->flags, flags, sizeof(wx->flags));
+
+ /* clear TX/RX timestamp state, just to be sure */
+ wx_ptp_clear_tx_timestamp(wx);
+ rd32(wx, WX_PSR_1588_STMPH);
+
+ return 0;
+}
+
+static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+{
+ struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
+
+ return wx_ptp_readtime(wx, NULL);
+}
+
+static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
+{
+ if (wx->mac.type == wx_mac_em) {
+ *shift = WX_INCVAL_SHIFT_EM;
+ *incval = WX_INCVAL_EM;
+ return;
+ }
+
+ switch (wx->speed) {
+ case SPEED_10:
+ *shift = WX_INCVAL_SHIFT_10;
+ *incval = WX_INCVAL_10;
+ break;
+ case SPEED_100:
+ *shift = WX_INCVAL_SHIFT_100;
+ *incval = WX_INCVAL_100;
+ break;
+ case SPEED_1000:
+ *shift = WX_INCVAL_SHIFT_1GB;
+ *incval = WX_INCVAL_1GB;
+ break;
+ case SPEED_10000:
+ default:
+ *shift = WX_INCVAL_SHIFT_10GB;
+ *incval = WX_INCVAL_10GB;
+ break;
+ }
+}
+
+/**
+ * wx_ptp_reset_cyclecounter - create the cycle counter from hw
+ * @wx: pointer to the wx structure
+ *
+ * This function should be called to set the proper values for the TSC_1588_INC
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TSC_1588_INC value is
+ * necessary, such as during initialization or when the link speed changes.
+ */
+void wx_ptp_reset_cyclecounter(struct wx *wx)
+{
+ u32 incval = 0, mask = 0;
+ struct cyclecounter cc;
+ unsigned long flags;
+
+ /* For some of the boards below this mask is technically incorrect.
+ * The timestamp mask overflows at approximately 61bits. However the
+ * particular hardware does not overflow on an even bitmask value.
+ * Instead, it overflows due to conversion of upper 32bits billions of
+ * cycles. Timecounters are not really intended for this purpose so
+ * they do not properly function if the overflow point isn't 2^N-1.
+ * However, the actual SYSTIME values in question take ~138 years to
+ * overflow. In practice this means they won't actually overflow. A
+ * proper fix to this problem would require modification of the
+ * timecounter delta calculations.
+ */
+ cc.mask = CLOCKSOURCE_MASK(64);
+ cc.mult = 1;
+ cc.shift = 0;
+
+ cc.read = wx_ptp_read;
+ wx_ptp_link_speed_adjust(wx, &cc.shift, &incval);
+
+ /* update the base incval used to calculate frequency adjustment */
+ WRITE_ONCE(wx->base_incval, incval);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ smp_mb(); /* Force the above update. */
+
+ /* need lock to prevent incorrect read while modifying cyclecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+}
+EXPORT_SYMBOL(wx_ptp_reset_cyclecounter);
+
+void wx_ptp_reset(struct wx *wx)
+{
+ unsigned long flags;
+
+ /* reset the hardware timestamping mode */
+ wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config);
+ wx_ptp_reset_cyclecounter(wx);
+
+ wr32ptp(wx, WX_TSC_1588_SYSTIML, 0);
+ wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0);
+ WX_WRITE_FLUSH(wx);
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ ptp_schedule_worker(wx->ptp_clock, HZ);
+
+ /* Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_reset);
+
+void wx_ptp_init(struct wx *wx)
+{
+ /* Initialize the seqlock_t first, since the user might call the clock
+ * functions any time after we've initialized the ptp clock device.
+ */
+ seqlock_init(&wx->hw_tc_lock);
+
+ /* obtain a ptp clock device, or re-use an existing device */
+ if (wx_ptp_create_clock(wx))
+ return;
+
+ wx->tx_hwtstamp_pkts = 0;
+ wx->tx_hwtstamp_timeouts = 0;
+ wx->tx_hwtstamp_skipped = 0;
+ wx->tx_hwtstamp_errors = 0;
+ wx->rx_hwtstamp_cleared = 0;
+ /* reset the ptp related hardware bits */
+ wx_ptp_reset(wx);
+
+ /* enter the WX_STATE_PTP_RUNNING state */
+ set_bit(WX_STATE_PTP_RUNNING, wx->state);
+}
+EXPORT_SYMBOL(wx_ptp_init);
+
+/**
+ * wx_ptp_suspend - stop ptp work items
+ * @wx: pointer to wx struct
+ *
+ * This function suspends ptp activity, and prevents more work from being
+ * generated, but does not destroy the clock device.
+ */
+void wx_ptp_suspend(struct wx *wx)
+{
+ /* leave the WX_STATE_PTP_RUNNING STATE */
+ if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state))
+ return;
+
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ wx_ptp_clear_tx_timestamp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_suspend);
+
+/**
+ * wx_ptp_stop - destroy the ptp_clock device
+ * @wx: pointer to wx struct
+ *
+ * Completely destroy the ptp_clock device, and disable all PTP related
+ * features. Intended to be run when the device is being closed.
+ */
+void wx_ptp_stop(struct wx *wx)
+{
+ /* first, suspend ptp activity */
+ wx_ptp_suspend(wx);
+
+ /* now destroy the ptp clock device */
+ if (wx->ptp_clock) {
+ ptp_clock_unregister(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_stop);
+
+/**
+ * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @wx: pointer to wx struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb)
+{
+ u64 regval = 0;
+ u32 tsyncrxctl;
+
+ /* Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID))
+ return;
+
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPL);
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval);
+}
+
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ *cfg = wx->tstamp_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_get);
+
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ err = wx_ptp_set_timestamp_mode(wx, cfg);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config));
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_set);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
new file mode 100644
index 000000000000..50db90a6e3ee
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_PTP_H_
+#define _WX_PTP_H_
+
+void wx_ptp_check_pps_event(struct wx *wx);
+void wx_ptp_reset_cyclecounter(struct wx *wx);
+void wx_ptp_reset(struct wx *wx);
+void wx_ptp_init(struct wx *wx);
+void wx_ptp_suspend(struct wx *wx);
+void wx_ptp_stop(struct wx *wx);
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb);
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+
+#endif /* _WX_PTP_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b54bffda027b..5b230ecbbabb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,8 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
@@ -180,6 +182,23 @@
#define WX_PSR_VLAN_CTL 0x15088
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
+/* EType Queue Filter */
+#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4))
+#define WX_PSR_ETYPE_SWC_FILTER_1588 3
+#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31)
+#define WX_PSR_ETYPE_SWC_1588 BIT(30)
+/* 1588 */
+#define WX_PSR_1588_MSG 0x15120
+#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0)
+#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1)
+#define WX_PSR_1588_STMPL 0x151E8
+#define WX_PSR_1588_STMPH 0x151A4
+#define WX_PSR_1588_CTL 0x15188
+#define WX_PSR_1588_CTL_ENABLED BIT(4)
+#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1)
+#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1)
+#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5)
+#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
@@ -253,6 +272,32 @@
#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+/* 1588 */
+#define WX_TSC_1588_CTL 0x11F00
+#define WX_TSC_1588_CTL_ENABLED BIT(4)
+#define WX_TSC_1588_CTL_VALID BIT(0)
+#define WX_TSC_1588_STMPL 0x11F04
+#define WX_TSC_1588_STMPH 0x11F08
+#define WX_TSC_1588_SYSTIML 0x11F0C
+#define WX_TSC_1588_SYSTIMH 0x11F10
+#define WX_TSC_1588_INC 0x11F14
+#define WX_TSC_1588_INT_ST 0x11F20
+#define WX_TSC_1588_INT_ST_TT1 BIT(5)
+#define WX_TSC_1588_INT_EN 0x11F24
+#define WX_TSC_1588_INT_EN_TT1 BIT(5)
+#define WX_TSC_1588_AUX_CTL 0x11F28
+#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8)
+#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2)
+#define WX_TSC_1588_AUX_CTL_PLSG BIT(1)
+#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0)
+#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */
+#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0)
+#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1)
+#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0)
+#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1)
+#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5)
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
@@ -264,6 +309,10 @@
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
#define WX_MNG_BMC2OS_CNT 0x1E090
#define WX_MNG_OS2BMC_CNT 0x1E094
+#define WX_SW2FW_MBOX_CMD 0x1E0A0
+#define WX_SW2FW_MBOX_CMD_VLD BIT(31)
+#define WX_SW2FW_MBOX 0x1E200
+#define WX_FW2SW_MBOX 0x1E300
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -327,6 +376,7 @@ enum WX_MSCA_CMD_value {
#define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_AML_MAX_EITR 0x00000FFFU
#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
@@ -370,6 +420,7 @@ enum WX_MSCA_CMD_value {
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+#define WX_HIC_HDR_INDEX_MAX 255
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
@@ -382,6 +433,8 @@ enum WX_MSCA_CMD_value {
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_PPS_SET_CMD 0xF6
+#define FW_PPS_SET_LEN 0x14
#define WX_SW_REGION_PTR 0x1C
@@ -460,6 +513,7 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
@@ -663,21 +717,30 @@ struct wx_hic_hdr {
u8 cmd_resv;
u8 ret_status;
} cmd_or_resp;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
union wx_hic_hdr2 {
@@ -701,6 +764,15 @@ struct wx_hic_reset {
u16 reset_type;
};
+struct wx_hic_set_pps {
+ struct wx_hic_hdr hdr;
+ u8 lan_id;
+ u8 enable;
+ u16 pad2;
+ u64 nsec;
+ u64 cycles;
+};
+
/* Bus parameters */
struct wx_bus_info {
u8 func;
@@ -716,7 +788,8 @@ struct wx_thermal_sensor_data {
enum wx_mac_type {
wx_mac_unknown = 0,
wx_mac_sp,
- wx_mac_em
+ wx_mac_em,
+ wx_mac_aml,
};
enum sp_media_type {
@@ -863,6 +936,7 @@ struct wx_tx_context_desc {
*/
struct wx_tx_buffer {
union wx_tx_desc *next_to_watch;
+ unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
@@ -924,6 +998,7 @@ struct wx_ring {
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
+ unsigned long last_rx_timestamp;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -1026,13 +1101,21 @@ struct wx_hw_stats {
enum wx_state {
WX_STATE_RESETTING,
- WX_STATE_NBITS, /* must be last */
+ WX_STATE_SWFW_BUSY,
+ WX_STATE_PTP_RUNNING,
+ WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_NBITS /* must be last */
};
enum wx_pf_flags {
+ WX_FLAG_SWFW_RING,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
+ WX_FLAG_RSC_CAPABLE,
+ WX_FLAG_RX_HWTSTAMP_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ WX_FLAG_PTP_PPS_ENABLED,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1066,6 +1149,7 @@ struct wx {
char eeprom_id[32];
char *driver_name;
enum wx_reset_type reset_type;
+ u8 swfw_index;
/* PHY stuff */
unsigned int link;
@@ -1133,6 +1217,29 @@ struct wx {
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
void (*do_reset)(struct net_device *netdev);
+ int (*ptp_setup_sdp)(struct wx *wx);
+
+ bool pps_enabled;
+ u64 pps_width;
+ u64 pps_edge_start;
+ u64 pps_edge_end;
+ u64 sec_to_cc;
+ u32 base_incval;
+ u32 tx_hwtstamp_pkts;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_errors;
+ u32 rx_hwtstamp_cleared;
+ unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
+ unsigned long ptp_tx_start;
+ seqlock_t hw_tc_lock; /* seqlock for ptp */
+ struct cyclecounter hw_cc;
+ struct timecounter hw_tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct sk_buff *ptp_tx_skb;
};
#define WX_INTR_ALL (~0ULL)
@@ -1177,6 +1284,24 @@ rd64(struct wx *wx, u32 reg)
return (lsb | msb << 32);
}
+static inline u32
+rd32ptp(struct wx *wx, u32 reg)
+{
+ if (wx->mac.type == wx_mac_em)
+ return rd32(wx, reg);
+
+ return rd32(wx, reg + 0xB500);
+}
+
+static inline void
+wr32ptp(struct wx *wx, u32 reg, u32 value)
+{
+ if (wx->mac.type == wx_mac_em)
+ return wr32(wx, reg, value);
+
+ return wr32(wx, reg + 0xB500, value);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index e868f7ef4920..7e2d9ec38a30 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -138,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 53aeae2f884b..a6159214ec0a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -167,7 +168,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
struct wx_q_vector *q_vector;
struct wx *wx = data;
struct pci_dev *pdev;
- u32 eicr;
+ u32 eicr, eicr_misc;
q_vector = wx->q_vector[0];
pdev = wx->pdev;
@@ -185,6 +186,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
if (!(pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
+ eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
+ if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
+
wx->isb_mem[WX_ISB_MISC] = 0;
/* would disable interrupts here but it is auto disabled */
napi_schedule_irqoff(&q_vector->napi);
@@ -198,6 +203,12 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
{
struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
/* re-enable the original interrupt state, no lsc, no queues */
if (netif_running(wx->netdev))
@@ -317,6 +328,8 @@ void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -379,6 +392,8 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_dis_phy;
+ wx_ptp_init(wx);
+
ngbe_up(wx);
return 0;
@@ -407,6 +422,7 @@ static int ngbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
ngbe_down(wx);
wx_free_irq(wx);
wx_free_isb_resources(wx);
@@ -507,6 +523,8 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index a5e9b779c44d..ea1d7e9a91f3 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -7,6 +7,7 @@
#include <linux/phy.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +65,11 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +109,11 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..992adbb98c7d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -70,15 +70,20 @@
/* Extended Interrupt Enable Set */
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
NGBE_PX_MISC_IEN_GPIO)
+/* Extended Interrupt Cause Read */
+#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
+
#define NGBE_INTR_ALL 0x1FF
#define NGBE_INTR_MISC BIT(0)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index d98314b26c19..78999d484f18 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -529,6 +529,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index cd1372da92a9..4b9921b7bb11 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -197,6 +197,12 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ if (wx->mac.type != wx_mac_sp) {
+ wr32(wx, TXGBE_PX_PF_BME, 0x1);
+ wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL,
+ TXGBE_RDM_RSC_CTL_FREE_CTL);
+ }
+
wx_clear_hw_cntrs(wx);
/* Store the permanent mac address */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 0ee73a265545..8658a51ee810 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -166,6 +166,9 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -177,6 +180,9 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
+ if (wx->mac.type == wx_mac_aml)
+ goto skip_sp_irq;
+
txgbe->misc.nirqs = 1;
txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
@@ -206,6 +212,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index f77450268036..a2e245e3b016 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -13,6 +13,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
@@ -34,6 +35,12 @@ char txgbe_driver_name[] = "txgbe";
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
/* required last entry */
{ .device = 0 }
};
@@ -89,7 +96,18 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- phylink_start(wx->phylink);
+ if (wx->mac.type == wx_mac_aml) {
+ u32 reg;
+
+ reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ txgbe_enable_sec_tx_path(wx);
+ netif_carrier_on(wx->netdev);
+ } else {
+ phylink_start(wx->phylink);
+ }
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -116,6 +134,9 @@ static void txgbe_reset(struct wx *wx)
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
wx_mac_set_default_filter(wx, old_addr);
+
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
}
static void txgbe_disable_device(struct wx *wx)
@@ -167,7 +188,10 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(wx->phylink);
+ if (wx->mac.type == wx_mac_aml)
+ netif_carrier_off(wx->netdev);
+ else
+ phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -176,6 +200,7 @@ void txgbe_down(struct wx *wx)
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
+ wx_ptp_init(wx);
txgbe_up_complete(wx);
}
@@ -192,6 +217,14 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_WX1820:
wx->mac.type = wx_mac_sp;
break;
+ case TXGBE_DEV_ID_AML5010:
+ case TXGBE_DEV_ID_AML5110:
+ case TXGBE_DEV_ID_AML5025:
+ case TXGBE_DEV_ID_AML5125:
+ case TXGBE_DEV_ID_AML5040:
+ case TXGBE_DEV_ID_AML5140:
+ wx->mac.type = wx_mac_aml;
+ break;
default:
wx->mac.type = wx_mac_unknown;
break;
@@ -265,6 +298,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
+ set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -279,6 +314,17 @@ static int txgbe_sw_init(struct wx *wx)
wx->do_reset = txgbe_do_reset;
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ set_bit(WX_FLAG_SWFW_RING, wx->flags);
+ wx->swfw_index = 0;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -321,6 +367,8 @@ static int txgbe_open(struct net_device *netdev)
if (err)
goto err_free_irq;
+ wx_ptp_init(wx);
+
txgbe_up_complete(wx);
return 0;
@@ -344,6 +392,7 @@ err_reset:
*/
static void txgbe_close_suspend(struct wx *wx)
{
+ wx_ptp_suspend(wx);
txgbe_disable_device(wx);
wx_free_resources(wx);
}
@@ -363,6 +412,7 @@ static int txgbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
@@ -479,6 +529,8 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1ae68f94dd49..85f022ceef4f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -15,6 +15,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
@@ -179,6 +180,10 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +220,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -557,6 +567,9 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
+ if (wx->mac.type == wx_mac_aml)
+ return 0;
+
if (txgbe->wx->media_type == sp_media_copper)
return txgbe_ext_phy_init(txgbe);
@@ -621,6 +634,9 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
if (txgbe->wx->media_type == sp_media_copper) {
phylink_disconnect_phy(txgbe->wx->phylink);
phylink_destroy(txgbe->wx->phylink);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 629a13e96b85..9c1c26234cad 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -10,6 +10,12 @@
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
+#define TXGBE_DEV_ID_AML5010 0x5010
+#define TXGBE_DEV_ID_AML5110 0x5110
+#define TXGBE_DEV_ID_AML5025 0x5025
+#define TXGBE_DEV_ID_AML5125 0x5125
+#define TXGBE_DEV_ID_AML5040 0x5040
+#define TXGBE_DEV_ID_AML5140 0x5140
/* Subsystem IDs */
/* SFP */
@@ -137,6 +143,14 @@
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+/*************************** Amber Lite Registers ****************************/
+#define TXGBE_PX_PF_BME 0x4B8
+#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28)
+#define TXGBE_RDM_RSC_CTL 0x1200C
+#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 35d96c633a33..7502214cc7d5 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
+ select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index a3f4f3e42587..5ff742103beb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,9 +113,6 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
-#define XAXIDMA_DELAY_SHIFT 24
-#define XAXIDMA_COALESCE_SHIFT 16
-
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
@@ -126,8 +124,7 @@
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
-#define XAXIDMA_DFT_RX_THRESHOLD 1
-#define XAXIDMA_DFT_RX_USEC 50
+#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -487,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
+ * @rx_dim: DIM state for the receive queue
+ * @rx_dim_enabled: Whether DIM is enabled or not
+ * @rx_irqs: Number of interrupts
+ * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -497,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
+ * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -532,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @coalesce_count_rx: Store the irq coalesce on RX side.
- * @coalesce_usec_rx: IRQ coalesce delay for RX
- * @coalesce_count_tx: Store the irq coalesce on TX side.
- * @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -569,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
+ struct dim rx_dim;
+ bool rx_dim_enabled;
+ u16 rx_irqs;
+ spinlock_t rx_cr_lock;
u32 rx_dma_cr;
+ bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -579,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
+ spinlock_t tx_cr_lock;
u32 tx_dma_cr;
+ bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -610,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- u32 coalesce_count_rx;
- u32 coalesce_usec_rx;
- u32 coalesce_count_tx;
- u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f33178f90c42..054abf283ab3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- XAXIDMA_DELAY_SCALE);
- return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
+
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
+
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -248,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -962,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -979,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1083,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1241,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1283,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1328,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1625,6 +1691,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1654,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1685,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -1999,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2018,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2046,12 +2208,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- if (netif_running(ndev)) {
- NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
- }
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
if (ecoalesce->rx_max_coalesced_frames > 255 ||
ecoalesce->tx_max_coalesced_frames > 255) {
@@ -2065,7 +2224,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- if ((ecoalesce->rx_max_coalesced_frames > 1 &&
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
!ecoalesce->rx_coalesce_usecs) ||
(ecoalesce->tx_max_coalesced_frames > 1 &&
!ecoalesce->tx_coalesce_usecs)) {
@@ -2074,11 +2233,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2316,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2499,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2858,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2891,7 +3077,6 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}