summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/atm/atmtcp.c10
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/atm/idt77252.c9
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/bluetooth/bcm203x.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/btintel.c59
-rw-r--r--drivers/bluetooth/btintel.h21
-rw-r--r--drivers/bluetooth/btmrvl_main.c11
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c21
-rw-r--r--drivers/bluetooth/btmtksdio.c16
-rw-r--r--drivers/bluetooth/btqca.c27
-rw-r--r--drivers/bluetooth/btqca.h2
-rw-r--r--drivers/bluetooth/btusb.c303
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_qca.c134
-rw-r--r--drivers/bluetooth/hci_serdev.c3
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c12
-rw-r--r--drivers/firmware/qemu_fw_cfg.c7
-rw-r--r--drivers/fpga/dfl-afu-main.c3
-rw-r--r--drivers/fpga/dfl-pci.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c10
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c30
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c16
-rw-r--r--drivers/i2c/i2c-core-slave.c7
-rw-r--r--drivers/infiniband/core/cq.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c29
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c33
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c4
-rw-r--r--drivers/interconnect/core.c12
-rw-r--r--drivers/interconnect/qcom/msm8916.c14
-rw-r--r--drivers/misc/habanalabs/command_submission.c14
-rw-r--r--drivers/misc/habanalabs/debugfs.c23
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/firmware_if.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c123
-rw-r--r--drivers/misc/habanalabs/goya/goya.c20
-rw-r--r--drivers/misc/habanalabs/habanalabs.h19
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c2
-rw-r--r--drivers/misc/habanalabs/hwmon.c19
-rw-r--r--drivers/misc/habanalabs/sysfs.c11
-rw-r--r--drivers/net/bareudp.c29
-rw-r--r--drivers/net/dsa/dsa_loop.c61
-rw-r--r--drivers/net/dsa/qca8k.c192
-rw-r--r--drivers/net/dsa/qca8k.h29
-rw-r--r--drivers/net/dsa/rtl8366.c35
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c79
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c492
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h96
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h468
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c38
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c286
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c20
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c160
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h148
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c39
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c87
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e100.c32
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c45
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h85
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c773
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h314
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c350
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c70
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c13
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c37
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h20
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h18
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c10
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c96
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c188
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_chain.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100.c543
-rw-r--r--drivers/net/ethernet/sfc/ef100.h12
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c24
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.h12
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c289
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h17
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c1276
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h80
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h693
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c167
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.h21
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c408
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h26
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h3
-rw-r--r--drivers/net/ethernet/sfc/io.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h39
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c20
-rw-r--r--drivers/net/ethernet/sfc/siena.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/silan/sc92031.c26
-rw-r--r--drivers/net/ethernet/sis/sis900.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/ti/tlan.c31
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/netdevsim/dev.c6
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/mdio_device.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/lan78xx.c113
-rw-r--r--drivers/net/vxlan.c16
-rw-r--r--drivers/net/wan/farsync.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h4
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/nvme.h7
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/pci/quirks.c13
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c74
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c1
-rw-r--r--drivers/ptp/idt8a340_reg.h48
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1145
-rw-r--r--drivers/ptp/ptp_clockmatrix.h61
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c76
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/scsi/scsi_lib.c16
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c20
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c20
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c2
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c16
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_exar.c12
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c18
-rw-r--r--drivers/tty/serial/serial-tegra.c16
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/tty/vt/vt.c29
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-tegra.c2
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/fbdev/core/bitblit.c4
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c4
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c4
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_balloon.c11
330 files changed, 11738 insertions, 2627 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 42c672f1584e..cbe6aa77d50d 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
mmap_read_unlock(mm);
- mmput(mm);
+ mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d9fd70280482..7f814da3c2d0 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
- if (!dev_data->persist) return 0;
+ if (!dev_data->persist) {
+ atm_dev_put(dev);
+ return 0;
+ }
dev_data->persist = 0;
- if (PRIV(dev)->vcc) return 0;
+ if (PRIV(dev)->vcc) {
+ atm_dev_put(dev);
+ return 0;
+ }
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b3d8e00e7671..39be444534d0 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1034,6 +1034,7 @@ static enum enq_res do_tx(struct sk_buff *skb)
u32 dma_rd,dma_wr;
u32 size; /* in words */
int aal5,dma_size,i,j;
+ unsigned char skb_data3;
DPRINTK(">do_tx\n");
NULLCHECK(skb);
@@ -1108,6 +1109,7 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
vcc->dev->number);
return enq_jam;
}
+ skb_data3 = skb->data[3];
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
DMA_TO_DEVICE);
ENI_PRV_PADDR(skb) = paddr;
@@ -1150,7 +1152,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
(size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4);
/*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/
writel((vcc->vci << MID_SEG_VCI_SHIFT) |
- (aal5 ? 0 : (skb->data[3] & 0xf)) |
+ (aal5 ? 0 : (skb_data3 & 0xf)) |
(ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0),
tx->send+((tx->tx_pos+1) & (tx->words-1))*4);
DPRINTK("size: %d, len:%d\n",size,skb->len);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index df51680e8931..65a3886f68c9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -835,6 +835,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
unsigned long flags;
int error;
int aal;
+ u32 word4;
if (skb->len == 0) {
printk("%s: invalid skb->len (%d)\n", card->name, skb->len);
@@ -846,6 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
tbd = &IDT77252_PRV_TBD(skb);
vcc = ATM_SKB(skb)->vcc;
+ word4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
+ (skb->data[2] << 8) | (skb->data[3] << 0);
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
@@ -859,8 +862,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
tbd->word_1 = SAR_TBD_OAM | ATM_CELL_PAYLOAD | SAR_TBD_EPDU;
tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
tbd->word_3 = 0x00000000;
- tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
- (skb->data[2] << 8) | (skb->data[3] << 0);
+ tbd->word_4 = word4;
if (test_bit(VCF_RSV, &vc->flags))
vc = card->vcs[0];
@@ -890,8 +892,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
tbd->word_3 = 0x00000000;
- tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
- (skb->data[2] << 8) | (skb->data[3] << 0);
+ tbd->word_4 = word4;
break;
case ATM_AAL5:
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1e6d75e65938..d58aa98fe964 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
return next;
/* When no more children in primary, continue with secondary */
- if (!IS_ERR_OR_NULL(fwnode->secondary))
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
next = fwnode_get_next_child_node(fwnode->secondary, child);
return next;
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 3b176257b993..e667933c3d70 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -106,7 +106,7 @@ static void bcm203x_complete(struct urb *urb)
}
data->state = BCM203X_LOAD_FIRMWARE;
- /* fall through */
+ fallthrough;
case BCM203X_LOAD_FIRMWARE:
if (data->fw_sent == data->fw_size) {
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index cc6e56223656..36eabf61717f 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -295,7 +295,6 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
baud_reg = REG_CONTROL_BAUD_RATE_115200;
break;
case PKT_BAUD_RATE_57600:
- /* Fall through... */
default:
baud_reg = REG_CONTROL_BAUD_RATE_57600;
break;
@@ -585,7 +584,6 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200;
break;
case 57600:
- /* Fall through... */
default:
cmd[4] = 0x03;
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 6a0e2c5a8beb..5fa5be3c5598 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -754,6 +754,65 @@ void btintel_reset_to_bootloader(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
+int btintel_read_debug_features(struct hci_dev *hdev,
+ struct intel_debug_features *features)
+{
+ struct sk_buff *skb;
+ u8 page_no = 1;
+
+ /* Intel controller supports two pages, each page is of 128-bit
+ * feature bit mask. And each bit defines specific feature support
+ */
+ skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading supported features failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != (sizeof(features->page1) + 3)) {
+ bt_dev_err(hdev, "Supported features event size mismatch");
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ memcpy(features->page1, skb->data + 3, sizeof(features->page1));
+
+ /* Read the supported features page2 if required in future.
+ */
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_debug_features);
+
+int btintel_set_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+ struct sk_buff *skb;
+
+ if (!features)
+ return -EINVAL;
+
+ if (!(features->page1[0] & 0x3f)) {
+ bt_dev_info(hdev, "Telemetry exception format not supported");
+ return 0;
+ }
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_debug_features);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index a69ea8a87b9b..08e20606fb58 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -62,6 +62,10 @@ struct intel_reset {
__le32 boot_param;
} __packed;
+struct intel_debug_features {
+ __u8 page1[16];
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev);
@@ -88,6 +92,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
void btintel_reset_to_bootloader(struct hci_dev *hdev);
+int btintel_read_debug_features(struct hci_dev *hdev,
+ struct intel_debug_features *features);
+int btintel_set_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -186,4 +194,17 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
}
+
+static inline int btintel_read_debug_features(struct hci_dev *hdev,
+ struct intel_debug_features *features)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 708ad21683eb..8b9d78ce6bb2 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -587,6 +587,14 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
+static bool btmrvl_prevent_wake(struct hci_dev *hdev)
+{
+ struct btmrvl_private *priv = hci_get_drvdata(hdev);
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
+
+ return !device_may_wakeup(&card->func->dev);
+}
+
/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
@@ -669,6 +677,7 @@ static int btmrvl_service_main_thread(void *data)
int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
int ret;
hdev = hci_alloc_dev();
@@ -687,6 +696,8 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
+ hdev->prevent_wake = btmrvl_prevent_wake;
+ SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index a296f8526433..d15fd5be0216 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -111,6 +111,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
"Failed to request irq_bt %d (%d)\n",
cfg->irq_bt, ret);
}
+
+ /* Configure wakeup (enabled by default) */
+ device_init_wakeup(dev, true);
disable_irq(cfg->irq_bt);
}
}
@@ -328,7 +331,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL,
- .firmware = "mrvl/sd8977_uapsta.bin",
+ .firmware = "mrvl/sdsd8977_combo_v2.bin",
.reg = &btmrvl_reg_8977,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
@@ -346,7 +349,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
- .firmware = "mrvl/sd8997_uapsta.bin",
+ .firmware = "mrvl/sdsd8997_combo_v4.bin",
.reg = &btmrvl_reg_8997,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
@@ -1654,6 +1657,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
MODULE_SHUTDOWN_REQ);
btmrvl_sdio_disable_host_int(card);
}
+
BT_DBG("unregister dev");
card->priv->surprise_removed = true;
btmrvl_sdio_unregister_dev(card);
@@ -1690,7 +1694,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
}
/* Enable platform specific wakeup interrupt */
- if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
+ device_may_wakeup(dev)) {
card->plt_wake_cfg->wake_by_bt = false;
enable_irq(card->plt_wake_cfg->irq_bt);
enable_irq_wake(card->plt_wake_cfg->irq_bt);
@@ -1707,7 +1712,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
BT_ERR("HS not activated, suspend failed!");
/* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg &&
- card->plt_wake_cfg->irq_bt >= 0) {
+ card->plt_wake_cfg->irq_bt >= 0 &&
+ device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt);
}
@@ -1767,7 +1773,8 @@ static int btmrvl_sdio_resume(struct device *dev)
hci_resume_dev(hcidev);
/* Disable platform specific wakeup interrupt */
- if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
+ device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt);
if (card->plt_wake_cfg->wake_by_bt)
@@ -1831,6 +1838,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
-MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
-MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index bff095be2f97..c7ab7a23bd67 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -685,7 +685,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
- u8 flag;
+ u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@@ -693,6 +693,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err;
}
+ /* Power on data RAM the firmware relies on. */
+ param = 1;
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 3;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
+ return err;
+ }
+
fw_ptr = fw->data;
fw_size = fw->size;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index c5984966f315..ce9dcffdc5bf 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -400,6 +400,27 @@ out:
return ret;
}
+static int qca_disable_soc_logging(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 cmd[2];
+ int err;
+
+ cmd[0] = QCA_DISABLE_LOGGING_SUB_OP;
+ cmd[1] = 0x00;
+ skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
@@ -486,6 +507,12 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ if (soc_type >= QCA_WCN3991) {
+ err = qca_disable_soc_logging(hdev);
+ if (err < 0)
+ return err;
+ }
+
/* Perform HCI reset */
err = qca_send_reset(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6e1e62dd4b95..d81b74c408a5 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -14,6 +14,7 @@
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
+#define QCA_DISABLE_LOGGING (0xFC17)
#define EDL_CMD_REQ_RES_EVT (0x00)
#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -22,6 +23,7 @@
#define EDL_CMD_EXE_STATUS_EVT (0x00)
#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
+#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5f022e9cf667..8d2608ddfd08 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -359,6 +359,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -453,6 +457,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14
+#define BTUSB_USE_ALT1_FOR_WBS 15
struct btusb_data {
struct hci_dev *hdev;
@@ -511,7 +516,6 @@ struct btusb_data {
unsigned cmd_timeout_cnt;
};
-
static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -573,6 +577,23 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
gpiod_set_value_cansleep(reset_gpio, 0);
}
+static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ if (++data->cmd_timeout_cnt < 5)
+ return;
+
+ bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
+ /* This is not an unbalanced PM reference since the device will reset */
+ err = usb_autopm_get_interface(data->intf);
+ if (!err)
+ usb_queue_reset_device(data->intf);
+ else
+ bt_dev_err(hdev, "Failed usb_autopm_get_interface with %d", err);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -1666,14 +1687,15 @@ static void btusb_work(struct work_struct *work)
new_alts = data->sco_num;
}
} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
-
- data->usb_alt6_packet_flow = true;
-
/* Check if Alt 6 is supported for Transparent audio */
- if (btusb_find_altsetting(data, 6))
+ if (btusb_find_altsetting(data, 6)) {
+ data->usb_alt6_packet_flow = true;
new_alts = 6;
- else
+ } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
+ new_alts = 1;
+ } else {
bt_dev_err(hdev, "Device does not support ALT setting 6");
+ }
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -1720,6 +1742,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
{
struct hci_rp_read_local_version *rp;
struct sk_buff *skb;
+ bool is_fake = false;
BT_DBG("%s", hdev->name);
@@ -1739,18 +1762,69 @@ static int btusb_setup_csr(struct hci_dev *hdev)
rp = (struct hci_rp_read_local_version *)skb->data;
- /* Detect controllers which aren't real CSR ones. */
+ /* Detect a wide host of Chinese controllers that aren't CSR.
+ *
+ * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891
+ *
+ * The main thing they have in common is that these are really popular low-cost
+ * options that support newer Bluetooth versions but rely on heavy VID/PID
+ * squatting of this poor old Bluetooth 1.1 device. Even sold as such.
+ *
+ * We detect actual CSR devices by checking that the HCI manufacturer code
+ * is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and
+ * HCI rev values always match. As they both store the firmware number.
+ */
if (le16_to_cpu(rp->manufacturer) != 10 ||
- le16_to_cpu(rp->lmp_subver) == 0x0c5c) {
+ le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver))
+ is_fake = true;
+
+ /* Known legit CSR firmware build numbers and their supported BT versions:
+ * - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e
+ * - 1.2 (0x2) -> 0x04d9, 0x0529
+ * - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c
+ * - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External)
+ * - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb
+ *
+ * e.g. Real CSR dongles with LMP subversion 0x73 are old enough that
+ * support BT 1.1 only; so it's a dead giveaway when some
+ * third-party BT 4.0 dongle reuses it.
+ */
+ else if (le16_to_cpu(rp->lmp_subver) <= 0x034e &&
+ le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_1)
+ is_fake = true;
+
+ else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 &&
+ le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_2)
+ is_fake = true;
+
+ else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c &&
+ le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_0)
+ is_fake = true;
+
+ else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 &&
+ le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_1)
+ is_fake = true;
+
+ else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb &&
+ le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0)
+ is_fake = true;
+
+ if (is_fake) {
+ bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds...");
+
+ /* Generally these clones have big discrepancies between
+ * advertised features and what's actually supported.
+ * Probably will need to be expanded in the future;
+ * without these the controller will lock up.
+ */
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
*/
clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
-
- /* These fake CSR controllers have all a broken
- * stored link key handling and so just disable it.
- */
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
}
kfree_skb(skb);
@@ -2262,45 +2336,25 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
return true;
}
-static int btusb_setup_intel_new(struct hci_dev *hdev)
+static int btusb_intel_download_firmware(struct hci_dev *hdev,
+ struct intel_version *ver,
+ struct intel_boot_params *params)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct intel_version ver;
- struct intel_boot_params params;
const struct firmware *fw;
u32 boot_param;
char fwname[64];
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
int err;
+ struct btusb_data *data = hci_get_drvdata(hdev);
- BT_DBG("%s", hdev->name);
-
- /* Set the default boot parameter to 0x0 and it is updated to
- * SKU specific boot parameter after reading Intel_Write_Boot_Params
- * command while downloading the firmware.
- */
- boot_param = 0x00000000;
-
- calltime = ktime_get();
-
- /* Read the Intel version information to determine if the device
- * is in bootloader mode or if it already has operational firmware
- * loaded.
- */
- err = btintel_read_version(hdev, &ver);
- if (err) {
- bt_dev_err(hdev, "Intel Read version failed (%d)", err);
- btintel_reset_to_bootloader(hdev);
- return err;
- }
+ if (!ver || !params)
+ return -EINVAL;
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
*/
- if (ver.hw_platform != 0x37) {
+ if (ver->hw_platform != 0x37) {
bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
- ver.hw_platform);
+ ver->hw_platform);
return -EINVAL;
}
@@ -2310,7 +2364,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* This check has been put in place to ensure correct forward
* compatibility options when newer hardware variants come along.
*/
- switch (ver.hw_variant) {
+ switch (ver->hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
case 0x11: /* JfP */
@@ -2320,11 +2374,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
- ver.hw_variant);
+ ver->hw_variant);
return -EINVAL;
}
- btintel_version_info(hdev, &ver);
+ btintel_version_info(hdev, ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
@@ -2339,25 +2393,25 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode.
*/
- if (ver.fw_variant == 0x23) {
+ if (ver->fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- goto finish;
+ return 0;
}
/* If the device is not in bootloader mode, then the only possible
* choice is to return an error and abort the device initialization.
*/
- if (ver.fw_variant != 0x06) {
+ if (ver->fw_variant != 0x06) {
bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
- ver.fw_variant);
+ ver->fw_variant);
return -ENODEV;
}
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
- err = btintel_read_boot_params(hdev, &params);
+ err = btintel_read_boot_params(hdev, params);
if (err)
return err;
@@ -2365,16 +2419,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
- if (params.limited_cce != 0x00) {
+ if (params->limited_cce != 0x00) {
bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
- params.limited_cce);
+ params->limited_cce);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
- if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
@@ -2400,7 +2454,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
*
*/
- err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+ err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi");
if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
@@ -2415,16 +2469,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
bt_dev_info(hdev, "Found device firmware: %s", fwname);
- /* Save the DDC file name for later use to apply once the firmware
- * downloading is done.
- */
- err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
- sizeof(fwname), "ddc");
- if (!err) {
- bt_dev_err(hdev, "Unsupported Intel firmware naming");
- return -EINVAL;
- }
-
if (fw->size < 644) {
bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
fw->size);
@@ -2479,18 +2523,58 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
goto done;
}
+done:
+ release_firmware(fw);
+ return err;
+}
+
+static int btusb_setup_intel_new(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct intel_version ver;
+ struct intel_boot_params params;
+ u32 boot_param;
+ char ddcname[64];
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int err;
+ struct intel_debug_features features;
+
+ BT_DBG("%s", hdev->name);
+
+ /* Set the default boot parameter to 0x0 and it is updated to
+ * SKU specific boot parameter after reading Intel_Write_Boot_Params
+ * command while downloading the firmware.
+ */
+ boot_param = 0x00000000;
+
+ calltime = ktime_get();
+
+ /* Read the Intel version information to determine if the device
+ * is in bootloader mode or if it already has operational firmware
+ * loaded.
+ */
+ err = btintel_read_version(hdev, &ver);
+ if (err) {
+ bt_dev_err(hdev, "Intel Read version failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
+ return err;
+ }
+
+ err = btusb_intel_download_firmware(hdev, &ver, &params);
+ if (err)
+ return err;
+
+ /* controller is already having an operational firmware */
+ if (ver.fw_variant == 0x23)
+ goto finish;
+
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
-done:
- release_firmware(fw);
-
- if (err < 0)
- return err;
-
calltime = ktime_get();
set_bit(BTUSB_BOOTING, &data->flags);
@@ -2534,13 +2618,28 @@ done:
clear_bit(BTUSB_BOOTLOADER, &data->flags);
- /* Once the device is running in operational mode, it needs to apply
- * the device configuration (DDC) parameters.
- *
- * The device can work without DDC parameters, so even if it fails
- * to load the file, no need to fail the setup.
+ err = btusb_setup_intel_new_get_fw_name(&ver, &params, ddcname,
+ sizeof(ddcname), "ddc");
+
+ if (!err) {
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
+ } else {
+ /* Once the device is running in operational mode, it needs to
+ * apply the device configuration (DDC) parameters.
+ *
+ * The device can work without DDC parameters, so even if it
+ * fails to load the file, no need to fail the setup.
+ */
+ btintel_load_ddc_config(hdev, ddcname);
+ }
+
+ /* Read the Intel supported features and if new exception formats
+ * supported, need to load the additional DDC config to enable.
*/
- btintel_load_ddc_config(hdev, fwname);
+ btintel_read_debug_features(hdev, &features);
+
+ /* Set DDC mask for available debug features */
+ btintel_set_debug_features(hdev, &features);
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &ver);
@@ -2925,7 +3024,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
- u8 flag;
+ u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@@ -2933,6 +3032,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err;
}
+ /* Power on data RAM the firmware relies on. */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 3;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
+ return err;
+ }
+
fw_ptr = fw->data;
fw_size = fw->size;
@@ -3704,6 +3817,9 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
+ if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags))
+ return true;
+
return !device_may_wakeup(&data->udev->dev);
}
@@ -3941,10 +4057,20 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf);
}
+ if (id->driver_info & BTUSB_AMP) {
+ /* AMP controllers do not support SCO packets */
+ data->isoc = NULL;
+ } else {
+ /* Interface orders are hardcoded in the specification */
+ data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
+ data->isoc_ifnum = ifnum_base + 1;
+ }
+
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
(id->driver_info & BTUSB_REALTEK)) {
hdev->setup = btrtl_setup_realtek;
@@ -3956,19 +4082,10 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
-
- err = usb_autopm_get_interface(intf);
- if (err < 0)
- goto out_free_dev;
- }
-
- if (id->driver_info & BTUSB_AMP) {
- /* AMP controllers do not support SCO packets */
- data->isoc = NULL;
- } else {
- /* Interface orders are hardcoded in the specification */
- data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
- data->isoc_ifnum = ifnum_base + 1;
+ if (btusb_find_altsetting(data, 1))
+ set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
+ else
+ bt_dev_err(hdev, "Device does not support ALT setting 1");
}
if (!reset)
@@ -4001,11 +4118,13 @@ static int btusb_probe(struct usb_interface *intf,
if (bcdDevice < 0x117)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ /* This must be set first in case we disable it for fakes */
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+
/* Fake CSR devices with broken commands */
- if (bcdDevice <= 0x100 || bcdDevice == 0x134)
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x0001)
hdev->setup = btusb_setup_csr;
-
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index e60b2e0773db..e41854e0d79a 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -793,7 +793,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
- set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
+ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index d9a4c6c691e0..8bfe024d1fcd 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -219,7 +219,7 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu)
* perfectly safe to always send one.
*/
BT_DBG("dual wake-up-indication");
- /* fall through */
+ fallthrough;
case HCILL_ASLEEP:
/* acknowledge device wake up */
if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 81c3c38baba1..20e1dedbc58c 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -46,7 +46,7 @@
#define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
-#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
+#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000
@@ -72,7 +72,8 @@ enum qca_flags {
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
QCA_MEMDUMP_COLLECTION,
- QCA_HW_ERROR_EVENT
+ QCA_HW_ERROR_EVENT,
+ QCA_SSR_TRIGGERED
};
enum qca_capabilities {
@@ -289,25 +290,21 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
case HCI_IBS_TX_VOTE_CLOCK_ON:
qca->tx_vote = true;
qca->tx_votes_on++;
- new_vote = true;
break;
case HCI_IBS_RX_VOTE_CLOCK_ON:
qca->rx_vote = true;
qca->rx_votes_on++;
- new_vote = true;
break;
case HCI_IBS_TX_VOTE_CLOCK_OFF:
qca->tx_vote = false;
qca->tx_votes_off++;
- new_vote = qca->rx_vote | qca->tx_vote;
break;
case HCI_IBS_RX_VOTE_CLOCK_OFF:
qca->rx_vote = false;
qca->rx_votes_off++;
- new_vote = qca->rx_vote | qca->tx_vote;
break;
default:
@@ -315,6 +312,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
return;
}
+ new_vote = qca->rx_vote | qca->tx_vote;
+
if (new_vote != old_vote) {
if (new_vote)
__serial_clock_on(hu->tty);
@@ -474,8 +473,6 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_WAKING:
- /* Fall through */
-
default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
@@ -518,8 +515,6 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_AWAKE:
- /* Fall through */
-
default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
@@ -837,8 +832,6 @@ static void device_woke_up(struct hci_uart *hu)
break;
case HCI_IBS_TX_ASLEEP:
- /* Fall through */
-
default:
BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
qca->tx_ibs_state);
@@ -862,6 +855,13 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
qca->tx_ibs_state);
+ if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
+ /* As SSR is in progress, ignore the packets */
+ bt_dev_dbg(hu->hdev, "SSR is in progress");
+ kfree_skb(skb);
+ return 0;
+ }
+
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
@@ -983,8 +983,11 @@ static void qca_controller_memdump(struct work_struct *work)
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
mutex_lock(&qca->hci_memdump_lock);
- /* Skip processing the received packets if timeout detected. */
- if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
+ /* Skip processing the received packets if timeout detected
+ * or memdump collection completed.
+ */
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
+ qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1128,6 +1131,7 @@ static int qca_controller_memdump_event(struct hci_dev *hdev,
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
skb_queue_tail(&qca->rx_memdump_q, skb);
queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
@@ -1485,9 +1489,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
- char *memdump_buf = NULL;
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
@@ -1509,19 +1512,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
qca_wait_for_dump_collection(hdev);
}
+ mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
- mutex_lock(&qca->hci_memdump_lock);
- if (qca_memdump)
- memdump_buf = qca_memdump->memdump_buf_head;
- vfree(memdump_buf);
- kfree(qca_memdump);
- qca->qca_memdump = NULL;
+ if (qca->qca_memdump) {
+ vfree(qca->qca_memdump->memdump_buf_head);
+ kfree(qca->qca_memdump);
+ qca->qca_memdump = NULL;
+ }
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
cancel_delayed_work(&qca->ctrl_memdump_timeout);
- skb_queue_purge(&qca->rx_memdump_q);
- mutex_unlock(&qca->hci_memdump_lock);
+ }
+ mutex_unlock(&qca->hci_memdump_lock);
+
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
+ qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
cancel_work_sync(&qca->ctrl_memdump_evt);
+ skb_queue_purge(&qca->rx_memdump_q);
}
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
@@ -1532,10 +1539,30 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- if (qca->memdump_state == QCA_MEMDUMP_IDLE)
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
qca_send_crashbuffer(hu);
- else
- bt_dev_info(hdev, "Dump collection is in process");
+ qca_wait_for_dump_collection(hdev);
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
+ /* Let us wait here until memory dump collected or
+ * memory dump timer expired.
+ */
+ bt_dev_info(hdev, "waiting for dump to complete");
+ qca_wait_for_dump_collection(hdev);
+ }
+
+ mutex_lock(&qca->hci_memdump_lock);
+ if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
+ /* Inject hw error event to reset the device
+ * and driver.
+ */
+ hci_reset_dev(hu->hdev);
+ }
+ }
+ mutex_unlock(&qca->hci_memdump_lock);
}
static int qca_wcn3990_init(struct hci_uart *hu)
@@ -1641,11 +1668,15 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "setting up %s",
qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
+
retry:
ret = qca_power_on(hdev);
if (ret)
return ret;
+ clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
+
if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
@@ -1788,9 +1819,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
- hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
-
/* Non-serdev device usually is powered by external power
* and don't need additional action in driver for power down
*/
@@ -1812,6 +1840,9 @@ static int qca_power_off(struct hci_dev *hdev)
struct qca_data *qca = hu->priv;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ hu->hdev->hw_error = NULL;
+ hu->hdev->cmd_timeout = NULL;
+
/* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
@@ -1819,7 +1850,6 @@ static int qca_power_off(struct hci_dev *hdev)
usleep_range(8000, 10000);
}
- qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu);
return 0;
}
@@ -1962,17 +1992,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
- if (!qcadev->susclk) {
+ if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
- } else {
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
-
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ return PTR_ERR(qcadev->susclk);
}
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
@@ -2050,6 +2080,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ bool tx_pending = false;
int ret = 0;
u8 cmd;
@@ -2068,7 +2099,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
del_timer(&qca->wake_retrans_timer);
- /* Fall through */
+ fallthrough;
case HCI_IBS_TX_AWAKE:
del_timer(&qca->tx_idle_timer);
@@ -2083,8 +2114,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
qca->ibs_sent_slps++;
-
- qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
+ tx_pending = true;
break;
case HCI_IBS_TX_ASLEEP:
@@ -2101,22 +2131,24 @@ static int __maybe_unused qca_suspend(struct device *dev)
if (ret < 0)
goto error;
- serdev_device_wait_until_sent(hu->serdev,
- msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+ if (tx_pending) {
+ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+ }
/* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
* to sleep, so that the packet does not wake the system later.
*/
-
ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
-
- if (ret > 0)
- return 0;
-
- if (ret == 0)
+ if (ret == 0) {
ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ return 0;
error:
clear_bit(QCA_SUSPENDING, &qca->flags);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 599855e4c57c..7b233312e723 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -355,7 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
- hci_unregister_dev(hdev);
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
cancel_work_sync(&hu->write_work);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 934c92dcb9ab..687d4af6945d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -814,7 +814,8 @@ static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
- struct inode *inode = READ_ONCE(devmem_inode);
+ /* pairs with smp_store_release() in devmem_init_inode() */
+ struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
@@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
return rc;
}
- /* publish /dev/mem initialized */
- WRITE_ONCE(devmem_inode, inode);
+ /*
+ * Publish /dev/mem initialized.
+ * Pairs with smp_load_acquire() in revoke_devmem().
+ */
+ smp_store_release(&devmem_inode, inode);
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2a41b21623ae..d20ba1b104ca 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
+ this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index f924c335a195..05520dccd906 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1348,7 +1348,7 @@ static void chtls_pass_accept_request(struct sock *sk,
oreq->rsk_rcv_wnd = 0;
oreq->rsk_window_clamp = 0;
- oreq->cookie_ts = 0;
+ oreq->syncookie = 0;
oreq->mss = 0;
oreq->ts_recent = 0;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index c3058dcdb33c..66d247efd561 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -525,9 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
/* Obtain version and type from previous copy */
crypto_info[0] = tmp_crypto_info;
/* Now copy the following data */
- sockptr_advance(optval, sizeof(*crypto_info));
- rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info),
- optval,
+ rc = copy_from_sockptr_offset((char *)crypto_info +
+ sizeof(*crypto_info),
+ optval, sizeof(*crypto_info),
sizeof(struct tls12_crypto_info_aes_gcm_128)
- sizeof(*crypto_info));
@@ -542,9 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
case TLS_CIPHER_AES_GCM_256: {
crypto_info[0] = tmp_crypto_info;
- sockptr_advance(optval, sizeof(*crypto_info));
- rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info),
- optval,
+ rc = copy_from_sockptr_offset((char *)crypto_info +
+ sizeof(*crypto_info),
+ optval, sizeof(*crypto_info),
sizeof(struct tls12_crypto_info_aes_gcm_256)
- sizeof(*crypto_info));
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 039e0f91dba8..6945c3c96637 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
- if (err)
- goto err_register;
+ if (err) {
+ kobject_put(&entry->kobj);
+ return err;
+ }
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
-err_register:
kfree(entry);
return err;
}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index b0c31789a909..3fa2c5992173 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
- if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+ v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 538755062ab7..a78c409bf2c4 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
struct dfl_fpga_cdev *cdev = drvdata->cdev;
- int ret = 0;
if (!num_vfs) {
/*
@@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
dfl_fpga_cdev_config_ports_pf(cdev);
} else {
+ int ret;
+
/*
* before enable SRIOV, put released ports into VF access mode
* first of all.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d7e17e34fee1..21292098bc02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info = {};
+ struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
+ memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ebb8a28ff002..02e6f8c4dde0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 86ffa0c2880f..710edc70e37e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* the same resource. If we have a new DC context as part of
* the DM atomic state from validation we need to free it and
* retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
*/
- struct dm_atomic_state *new_dm_state, *old_dm_state;
- new_dm_state = dm_atomic_get_new_state(state);
- old_dm_state = dm_atomic_get_old_state(state);
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
- if (new_dm_state && old_dm_state) {
- if (new_dm_state->context)
- dc_release_state(new_dm_state->context);
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
- new_dm_state->context = old_dm_state->context;
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
- if (old_dm_state->context)
- dc_retain_state(old_dm_state->context);
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 05d8373888e8..079f46f5cdb6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
+ bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 87b58c1acff4..648eb23d0784 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
+ adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index b14d725bf609..c7bc194bbce3 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
struct drm_panel *panel;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5609e164805f..89cfd68ef400 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
- memcpy(dst, src, len);
+ if (!fb_helper->dev->mode_config.fbdev_use_iomem)
+ memcpy(dst, src, len);
+ else
+ memcpy_toio((void __iomem *)dst, src, len);
+
src += fb->pitches[0];
dst += fb->pitches[0];
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 7bf628e13023..ee2058ad482c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -871,9 +871,6 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
- drm_gem_object_put_unlocked(obj);
if (ret)
- return ret;
+ goto err;
args->handle = handle;
args->size = obj->size;
- return 0;
+err:
+ drm_gem_object_put_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index bb27c82757f1..bf7888ad9ad4 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
}
}
- tr.len = chunk;
+ tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index b50b44e76279..8fc3f67e3e76 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
- if (!current_pt || pixels_type != current_pt) {
- of_node_put(remote_port);
+ if (!current_pt || pixels_type != current_pt)
return -EINVAL;
- }
}
return pixels_type;
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 08802e5177f6..4d2290f88edb 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
- if (!mcde->video_mode)
- /* Send a single frame using software sync */
- mcde_display_send_one_frame(mcde);
+ if (!mcde->video_mode) {
+ /*
+ * Send a single frame using software sync if the flow
+ * is not active yet.
+ */
+ if (mcde->flow_active == 0)
+ mcde_display_send_one_frame(mcde);
+ }
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 519f99868e35..800b7757252e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
- core->func->update(core, interlock, false);
+ nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
@@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
- if (disp->disp->object.oclass >= GF110_DISP)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 496c4621cc78..07373bbc2acf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
uint32_t *tile_mode,
uint8_t *kind)
{
+ struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
@@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
+
+ if ((modifier & (0xffull << 12)) == 0ull) {
+ /* Legacy modifier. Translate to this dev's 'kind.' */
+ modifier |= disp->format_modifiers[0] & (0xffull << 12);
+ }
+
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
@@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
}
}
+static const u64 legacy_modifiers[] = {
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
@@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
(disp->format_modifiers[mod] != modifier);
mod++);
- if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
- return -EINVAL;
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
+ for (mod = 0;
+ (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+ (legacy_modifiers[mod] != modifier);
+ mod++);
+ if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+ return -EINVAL;
+ }
nouveau_decode_mod(drm, modifier, tile_mode, kind);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3d11b84d4cf9..d5c23d1c20d8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@@ -590,6 +590,7 @@ fini:
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
+ drm->fbcon = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index dcf08249374a..dffcac249211 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
{
struct nvkm_ior *ior;
- /* First preference is to reuse the OR that is currently armed
- * on HW, if any, in order to prevent unnecessary switching.
- */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !!ior->func->hda.hpd == hda &&
- !ior->asy.outp && ior->arm.outp == outp)
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
-
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
@@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_ior(outp, user, ior);
}
+ /* First preference is to reuse the OR that is currently armed
+ * on HW, if any, in order to prevent unnecessary switching.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
+ /*XXX: For various complicated reasons, we can't outright switch
+ * the boot-time OR on the first modeset without some fairly
+ * invasive changes.
+ *
+ * The systems that were fixed by modifying the OR selection
+ * code to account for HDA support shouldn't regress here as
+ * the HDA-enabled ORs match the relevant output's pad macro
+ * index, and the firmware seems to select an OR this way.
+ *
+ * This warning is to make it obvious if that proves wrong.
+ */
+ WARN_ON(hda && !ior->func->hda.hpd);
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+ }
+
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 46fe1805c588..2649469070aa 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
- .hsync_start = 1200 + 80,
- .hsync_end = 1200 + 80 + 24,
- .htotal = 1200 + 80 + 24 + 36,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 24,
+ .htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5178f87d6574..4aeb960ccf15 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
.height = 165,
},
.delay = {
- .hpd_absent_delay = 200,
+ /*
+ * When power is first given to the panel there's a short
+ * spike on the HPD line. It was explained that this spike
+ * was until the TCON data download was complete. On
+ * one system this was measured at 8 ms. We'll put 15 ms
+ * in the prepare delay just to be safe and take it away
+ * from the hpd_absent_delay (which would otherwise be 200 ms)
+ * to handle this. That means:
+ * - If HPD isn't hooked up you still have 200 ms delay.
+ * - If HPD is hooked up we won't try to look at it for the
+ * first 15 ms.
+ */
+ .prepare = 15,
+ .hpd_absent_delay = 185,
+
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 5427f047faf0..1589179d5eb9 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
- if (!client || !slave_cb) {
- WARN(1, "insufficient data\n");
+ if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
- }
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
+ if (IS_ERR_OR_NULL(client))
+ return -EINVAL;
+
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 655795bfa0ee..513825e424bf 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
+static void rdma_dim_destroy(struct ib_cq *cq)
+{
+ if (!cq->dim)
+ return;
+
+ cancel_work_sync(&cq->dim->work);
+ kfree(cq->dim);
+}
+
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
+ rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
+ rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
- if (cq->dim)
- cancel_work_sync(&cq->dim->work);
- kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5b87eee8ccc8..d03dacaef788 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3421a0b15983..5f5408cdf008 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
stats->value[BNXT_RE_RX_DROPS] =
- le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ le64_to_cpu(bnxt_re_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1ab676b66894..77dca1e05bba 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
- work->num_sge = i - 1;
- if (i)
- destroy_prefetch_work(work);
+ work->num_sge = i;
return false;
}
@@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e050eade97a1..1225b8d77510 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1766,15 +1766,14 @@ err:
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
- if (ucmd)
- allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
- mutex_init(&qp->mutex);
-
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
- configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
- if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
- * We don't return error if this flag was provided,
- * and mlx5 doesn't have right capability.
- */
- *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
+ default:
+ break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
+ mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 49b8a43e3fa2..f5aa85a5377c 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1923,7 +1923,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
- params.elem_size = n_rq_elems;
+ params.num_elems = n_rq_elems;
params.elem_size = QEDR_RQE_ELEMENT_SIZE;
rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 7db35dd6ad74..332a8ba94b81 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
- if (qp->r_rq.kwq)
- qp->r_rq.kwq->count = qp->r_rq.size;
qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0);
}
@@ -2367,31 +2365,6 @@ bad_lkey:
}
/**
- * get_count - count numbers of request work queue entries
- * in circular buffer
- * @rq: data structure for request queue entry
- * @tail: tail indices of the circular buffer
- * @head: head indices of the circular buffer
- *
- * Return - total number of entries in the circular buffer
- */
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
-{
- u32 count;
-
- count = head;
-
- if (count >= rq->size)
- count = 0;
- if (count < tail)
- count += rq->size - tail;
- else
- count -= tail;
-
- return count;
-}
-
-/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
* @ip: the QP
@@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
head = get_rvt_head(rq, ip);
- kwq->count = get_count(rq, tail, head);
+ kwq->count = rvt_get_rq_count(rq, head, tail);
}
if (unlikely(kwq->count == 0)) {
ret = 0;
@@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* the number of remaining WQEs.
*/
if (kwq->count < srq->limit) {
- kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
+ kwq->count =
+ rvt_get_rq_count(rq,
+ get_rvt_head(rq, ip), tail);
if (kwq->count < srq->limit) {
struct ib_event ev;
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 977906cc0d11..c58735f4c94a 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
+ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
}
/*
* Binary search the credit table to find the code to
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index e5f998744501..9e1ab701785c 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
+ u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
@@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node)
p->pre_aggregate(node);
hlist_for_each_entry(r, &node->req_list, req_node) {
- if (!r->enabled)
- continue;
- p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
+ if (r->enabled) {
+ avg_bw = r->avg_bw;
+ peak_bw = r->peak_bw;
+ } else {
+ avg_bw = 0;
+ peak_bw = 0;
+ }
+ p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
}
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index e94f3c5228b7..42c6c5581662 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS,
DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
-DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
-DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
-DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
-DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
-DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
-DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
-DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index b0f62cbbdc87..f3a8f113865d 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev,
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
+ /* This must be checked here to prevent out-of-bounds access to
+ * hw_queues_props array
+ */
+ if (chunk->queue_index >= HL_MAX_QUEUES) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ return -EINVAL;
+ }
+
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
- if ((chunk->queue_index >= HL_MAX_QUEUES) ||
- (hw_queue_prop->type == QUEUE_TYPE_NA)) {
- dev_err(hdev->dev, "Queue index %d is invalid\n",
+ if (hw_queue_prop->type == QUEUE_TYPE_NA) {
+ dev_err(hdev->dev, "Queue index %d is not applicable\n",
chunk->queue_index);
return -EINVAL;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index fc4372c18ce2..0bc036e01ee8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, (long *) val);
+ 0, (long *) val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
@@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
@@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
if (*ppos)
return 0;
- sprintf(tmp_buf, "%d\n", hdev->clock_gating);
+ sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
@@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u32 value;
+ u64 value;
ssize_t rc;
if (atomic_read(&hdev->in_reset)) {
@@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
return 0;
}
- rc = kstrtouint_from_user(buf, count, 10, &value);
+ rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
- if (value) {
- hdev->clock_gating = 1;
- if (hdev->asic_funcs->enable_clock_gating)
- hdev->asic_funcs->enable_clock_gating(hdev);
- } else {
- if (hdev->asic_funcs->disable_clock_gating)
- hdev->asic_funcs->disable_clock_gating(hdev);
- hdev->clock_gating = 0;
- }
+ hdev->clock_gating_mask = value;
+ hdev->asic_funcs->set_clock_gating(hdev);
return count;
}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 2b38a119704c..59608d1bac88 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
if (!hdev->hard_reset_pending)
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
goto out;
}
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index baf790cf4b78..d27841cb5bcb 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
- sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+ sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, &result);
+ 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
- total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+ total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
- sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+ sizeof(test_pkt), 0, &result);
if (!rc) {
if (result != ARMCP_PACKET_FENCE_VAL)
@@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
- sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+ sizeof(hb_pkt), 0, &result);
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
rc = -EIO;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 834470d10b46..637a9d608707 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -80,6 +80,7 @@
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
@@ -98,6 +99,11 @@
#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
+#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
+ BIT(GAUDI_ENGINE_ID_MME_0) |\
+ BIT(GAUDI_ENGINE_ID_MME_2) |\
+ GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
+
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
};
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
- [GAUDI_PCI_DMA_1] = 0,
- [GAUDI_PCI_DMA_2] = 1,
- [GAUDI_PCI_DMA_3] = 5,
- [GAUDI_HBM_DMA_1] = 2,
- [GAUDI_HBM_DMA_2] = 3,
- [GAUDI_HBM_DMA_3] = 4,
- [GAUDI_HBM_DMA_4] = 6,
- [GAUDI_HBM_DMA_5] = 7
+ [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
+ [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
+ [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
+ [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
+ [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
+ [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
+ [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
+ [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
};
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
@@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_rate_limiter(hdev);
- gaudi_disable_clock_gating(hdev);
+ hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
@@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev)
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
}
-static void gaudi_enable_clock_gating(struct hl_device *hdev)
+static void gaudi_set_clock_gating(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
int i;
- if (!hdev->clock_gating)
- return;
-
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
- return;
-
/* In case we are during debug session, don't enable the clock gate
* as it may interfere
*/
if (hdev->in_debug)
return;
- for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i]))))
+ continue;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_UPPER_CP_CGM_PWR_GATE_EN);
}
- for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i]))))
+ continue;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
- WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME0_QM_CGM_CFG,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
+ WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
+
+ if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
+ WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
+ continue;
+
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
QMAN_CGM1_PWR_GATE_EN);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
@@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_stop_hbm_dma_qmans(hdev);
gaudi_stop_pci_dma_qmans(hdev);
- gaudi_disable_clock_gating(hdev);
+ hdev->asic_funcs->disable_clock_gating(hdev);
msleep(wait_timeout_ms);
@@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_tpc_qmans(hdev);
- gaudi_enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
gaudi_enable_timestamp(hdev);
@@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_HBM_DMA | HW_CAP_PLL |
HW_CAP_MMU |
HW_CAP_SRAM_SCRAMBLER |
- HW_CAP_HBM_SCRAMBLER);
+ HW_CAP_HBM_SCRAMBLER |
+ HW_CAP_CLK_GATE);
+
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
}
@@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
return 0;
}
+ if (!timeout)
+ timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
+
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
rc = -EPERM;
break;
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
case PACKET_LOAD_AND_EXE:
rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
(struct packet_load_and_exe *) user_pkt);
@@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
break;
case PACKET_WREG_32:
- case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
@@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
*val = RREG32(addr - CFG_BASE);
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
*val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
WREG32(addr - CFG_BASE, val);
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
@@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = (((u64) val_h) << 32) | val_l;
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
*val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
@@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
WREG32(addr + sizeof(u32) - CFG_BASE,
upper_32_bits(val));
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
@@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
}
if (disable_clock_gating) {
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
}
@@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
/* Clear interrupts */
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
if (s)
seq_puts(s, "\n");
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
dev_err(hdev->dev,
"Timeout while waiting for TPC%d icache prefetch\n",
tpc_id);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
return -EIO;
}
@@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
1000,
kernel_timeout);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
if (rc) {
@@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
.send_heartbeat = gaudi_send_heartbeat,
- .enable_clock_gating = gaudi_enable_clock_gating,
+ .set_clock_gating = gaudi_set_clock_gating,
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 0d2952bb58df..88460b2138d8 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -88,6 +88,7 @@
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
return 0;
}
+ if (!timeout)
+ timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
+
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
- HL_DEVICE_TIMEOUT_USEC, &result);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
- rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, &result);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
-static void goya_enable_clock_gating(struct hl_device *hdev)
+static void goya_set_clock_gating(struct hl_device *hdev)
{
-
+ /* clock gating not supported in Goya */
}
static void goya_disable_clock_gating(struct hl_device *hdev)
{
-
+ /* clock gating not supported in Goya */
}
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
@@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
- .enable_clock_gating = goya_enable_clock_gating,
+ .set_clock_gating = goya_set_clock_gating,
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 1ecdcf8b763a..194d83352696 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -578,8 +578,9 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
- * @enable_clock_gating: enable clock gating for reducing power consumption.
- * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @set_clock_gating: enable/disable clock gating per engine according to
+ * clock gating mask in hdev
+ * @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@@ -587,7 +588,11 @@ enum hl_pll_frequency {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
- * @send_cpu_message: send buffer to ArmCP.
+ * @send_cpu_message: send message to F/W. If the message is timedout, the
+ * driver will eventually reset the device. The timeout can
+ * be determined by the calling function or it can be 0 and
+ * then the timeout is the default timeout for the specific
+ * ASIC
* @get_hw_state: retrieve the H/W state
* @pci_bars_map: Map PCI BARs.
* @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
@@ -680,7 +685,7 @@ struct hl_asic_funcs {
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
- void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
@@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts {
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
* this value and update the F/W after the re-initialization
+ * @clock_gating_mask: is clock gating enabled. bitmask that represents the
+ * different engines. See debugfs-driver-habanalabs for
+ * details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @cs_active_cnt: number of active command submissions on this device (active
@@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts {
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @mmu_huge_page_opt: is MMU huge pages optimization enabled.
- * @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
@@ -1493,6 +1500,7 @@ struct hl_device {
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
+ u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
int cs_active_cnt;
@@ -1514,7 +1522,6 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
- u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 8652c7e5d7f1..22716da9f85f 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
- hdev->clock_gating = 1;
+ hdev->clock_gating_mask = ULONG_MAX;
hdev->reset_pcilink = 0;
hdev->axi_drain = 0;
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 8c6cd77e6af6..b997336fa75f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -10,7 +10,6 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
-#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 5d78d5e1c782..70b6b1863c2e 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -9,9 +9,6 @@
#include <linux/pci.h>
-#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
-#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
-
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct armcp_packet pkt;
@@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_CLK_PKT_TIMEOUT, &result);
+ 0, &result);
if (rc) {
dev_err(hdev->dev,
@@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_CLK_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_PWR_PKT_TIMEOUT, &result);
+ 0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
@@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_PWR_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 44eb2b1d0416..3b6664c7e73c 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -406,19 +406,34 @@ free_dst:
return err;
}
+static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
+{
+ if (bareudp->ethertype == proto)
+ return true;
+
+ if (!bareudp->multi_proto_mode)
+ return false;
+
+ if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
+ proto == htons(ETH_P_MPLS_MC))
+ return true;
+
+ if (bareudp->ethertype == htons(ETH_P_IP) &&
+ proto == htons(ETH_P_IPV6))
+ return true;
+
+ return false;
+}
+
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
int err;
- if (skb->protocol != bareudp->ethertype) {
- if (!bareudp->multi_proto_mode ||
- (skb->protocol != htons(ETH_P_MPLS_MC) &&
- skb->protocol != htons(ETH_P_IPV6))) {
- err = -EINVAL;
- goto tx_error;
- }
+ if (!bareudp_proto_valid(bareudp, skb->protocol)) {
+ err = -EINVAL;
+ goto tx_error;
}
info = skb_tunnel_info(skb);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index f8bc85a6e670..eb600b3dbf26 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -14,28 +14,11 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
+#include <linux/dsa/loop.h>
#include <net/dsa.h>
#include "dsa_loop.h"
-struct dsa_loop_vlan {
- u16 members;
- u16 untagged;
-};
-
-struct dsa_loop_mib_entry {
- char name[ETH_GSTRING_LEN];
- unsigned long val;
-};
-
-enum dsa_loop_mib_counters {
- DSA_LOOP_PHY_READ_OK,
- DSA_LOOP_PHY_READ_ERR,
- DSA_LOOP_PHY_WRITE_OK,
- DSA_LOOP_PHY_WRITE_ERR,
- __DSA_LOOP_CNT_MAX,
-};
-
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
[DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", },
@@ -43,21 +26,6 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", },
};
-struct dsa_loop_port {
- struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
-};
-
-#define DSA_LOOP_VLANS 5
-
-struct dsa_loop_priv {
- struct mii_bus *bus;
- unsigned int port_base;
- struct dsa_loop_vlan vlans[DSA_LOOP_VLANS];
- struct net_device *netdev;
- struct dsa_loop_port ports[DSA_MAX_PORTS];
- u16 pvid;
-};
-
static struct phy_device *phydevs[PHY_MAX_ADDR];
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
@@ -191,7 +159,7 @@ dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- if (vlan->vid_end > DSA_LOOP_VLANS)
+ if (vlan->vid_end > ARRAY_SIZE(ps->vlans))
return -ERANGE;
return 0;
@@ -224,7 +192,7 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
}
if (pvid)
- ps->pvid = vid;
+ ps->ports[port].pvid = vid;
}
static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
@@ -234,7 +202,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid, pvid = ps->pvid;
+ u16 vid, pvid = ps->ports[port].pvid;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
@@ -252,11 +220,26 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
__func__, port, vid, untagged ? "un" : "", pvid);
}
- ps->pvid = pvid;
+ ps->ports[port].pvid = pvid;
+
+ return 0;
+}
+
+static int dsa_loop_port_change_mtu(struct dsa_switch *ds, int port,
+ int new_mtu)
+{
+ struct dsa_loop_priv *priv = ds->priv;
+
+ priv->ports[port].mtu = new_mtu;
return 0;
}
+static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return ETH_MAX_MTU;
+}
+
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
@@ -273,6 +256,8 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.port_vlan_prepare = dsa_loop_port_vlan_prepare,
.port_vlan_add = dsa_loop_port_vlan_add,
.port_vlan_del = dsa_loop_port_vlan_del,
+ .port_change_mtu = dsa_loop_port_change_mtu,
+ .port_max_mtu = dsa_loop_port_max_mtu,
};
static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
@@ -290,7 +275,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
return -ENOMEM;
ds->dev = &mdiodev->dev;
- ds->num_ports = DSA_MAX_PORTS;
+ ds->num_ports = DSA_LOOP_NUM_PORTS;
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a5566de82853..f1e484477e35 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -408,6 +408,112 @@ qca8k_fdb_flush(struct qca8k_priv *priv)
mutex_unlock(&priv->reg_mutex);
}
+static int
+qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= vid << QCA8K_VTU_FUNC1_VID_S;
+
+ /* Write the function register triggering the table access */
+ qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+
+ /* wait for completion */
+ if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY))
+ return -ETIMEDOUT;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1);
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ We do the right thing with VLAN 0 and treat it as untagged while
+ preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+
+ qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
+ reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
+ mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
static void
qca8k_mib_init(struct qca8k_priv *priv)
{
@@ -663,10 +769,11 @@ qca8k_setup(struct dsa_switch *ds)
* default egress vid
*/
qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xffff << shift, 1 << shift);
+ 0xfff << shift,
+ QCA8K_PORT_VID_DEF << shift);
qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(1) |
- QCA8K_PORT_VLAN_SVID(1));
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
}
}
@@ -1133,7 +1240,7 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
{
/* Set the vid to the port vlan id if no vid is set */
if (!vid)
- vid = 1;
+ vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_add(priv, addr, port_mask, vid,
QCA8K_ATU_STATUS_STATIC);
@@ -1157,7 +1264,7 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port,
u16 port_mask = BIT(port);
if (!vid)
- vid = 1;
+ vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_del(priv, addr, port_mask, vid);
}
@@ -1186,6 +1293,76 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (vlan_filtering) {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return 0;
+}
+
+static int
+qca8k_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static void
+qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret = 0;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
+ ret = qca8k_vlan_add(priv, port, vid, untagged);
+
+ if (ret)
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+
+ if (pvid) {
+ int shift = 16 * (port % 2);
+
+ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ 0xfff << shift,
+ vlan->vid_end << shift);
+ qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid_end) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid_end));
+ }
+}
+
+static int
+qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret = 0;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
+ ret = qca8k_vlan_del(priv, port, vid);
+
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
static enum dsa_tag_protocol
qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -1211,6 +1388,10 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_prepare = qca8k_port_vlan_prepare,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
.phylink_validate = qca8k_phylink_validate,
.phylink_mac_link_state = qca8k_phylink_mac_link_state,
.phylink_mac_config = qca8k_phylink_mac_config,
@@ -1261,6 +1442,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
+ priv->ds->configure_vlan_while_not_filtering = true;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 31439396401c..7ca4b93e0bb5 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -22,6 +22,8 @@
#define QCA8K_CPU_PORT 0
+#define QCA8K_PORT_VID_DEF 1
+
/* Global control registers */
#define QCA8K_REG_MASK_CTRL 0x000
#define QCA8K_MASK_CTRL_ID_M 0xff
@@ -126,6 +128,19 @@
#define QCA8K_ATU_FUNC_FULL BIT(12)
#define QCA8K_ATU_FUNC_PORT_M 0xf
#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_REG_VTU_FUNC0 0x610
+#define QCA8K_VTU_FUNC0_VALID BIT(20)
+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
+#define QCA8K_REG_VTU_FUNC1 0x614
+#define QCA8K_VTU_FUNC1_BUSY BIT(31)
+#define QCA8K_VTU_FUNC1_VID_S 16
+#define QCA8K_VTU_FUNC1_FULL BIT(4)
#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
@@ -135,6 +150,11 @@
#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
@@ -178,6 +198,15 @@ enum qca8k_fdb_cmd {
QCA8K_FDB_SEARCH = 7,
};
+enum qca8k_vlan_cmd {
+ QCA8K_VLAN_FLUSH = 1,
+ QCA8K_VLAN_LOAD = 2,
+ QCA8K_VLAN_PURGE = 3,
+ QCA8K_VLAN_REMOVE_PORT = 4,
+ QCA8K_VLAN_NEXT = 5,
+ QCA8K_VLAN_READ = 6,
+};
+
struct ar8xxx_port_status {
int enabled;
};
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 993cf3ac59d9..8f40fbf70a82 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int ret;
int i;
+ dev_dbg(smi->dev,
+ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, member, untag);
+
/* Update the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
if (ret)
return ret;
- vlan4k.member = member;
- vlan4k.untag = untag;
+ vlan4k.member |= member;
+ vlan4k.untag |= untag;
vlan4k.fid = fid;
ret = smi->ops->set_vlan_4k(smi, &vlan4k);
if (ret)
return ret;
+ dev_dbg(smi->dev,
+ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlan4k.member, vlan4k.untag);
+
/* Try to find an existing MC entry for this VID */
for (i = 0; i < smi->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
@@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
if (vid == vlanmc.vid) {
/* update the MC entry */
- vlanmc.member = member;
- vlanmc.untag = untag;
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
vlanmc.fid = fid;
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+
+ dev_dbg(smi->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
+
break;
}
}
@@ -384,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
dev_err(smi->dev, "port is DSA or CPU port\n");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
int pvid_val = 0;
dev_info(smi->dev, "add VLAN %04x\n", vid);
@@ -407,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (ret < 0)
return;
}
- }
- ret = rtl8366_set_vlan(smi, port, member, untag, 0);
- if (ret)
- dev_err(smi->dev,
- "failed to set up VLAN %04x",
- vid);
+ ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set up VLAN %04x",
+ vid);
+ }
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 177134596458..1b90570b257b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -24,7 +24,7 @@
* this hardware can do (but may be enough for some setups). Anything of higher
* frequency than 1 Hz will be lost, since there is no timestamp FIFO.
*/
-#define SJA1105_EXTTS_INTERVAL (HZ / 4)
+#define SJA1105_EXTTS_INTERVAL (HZ / 6)
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
@@ -51,8 +51,8 @@ enum sja1105_ptp_clk_mode {
PTP_SET_MODE = 0,
};
-#define extts_to_data(d) \
- container_of((d), struct sja1105_ptp_data, extts_work)
+#define extts_to_data(t) \
+ container_of((t), struct sja1105_ptp_data, extts_timer)
#define ptp_caps_to_data(d) \
container_of((d), struct sja1105_ptp_data, caps)
#define ptp_data_to_sja1105(d) \
@@ -350,6 +350,30 @@ static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
ptp_sts);
}
+static void sja1105_extts_poll(struct sja1105_private *priv)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+}
+
static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
@@ -380,6 +404,9 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
netif_rx_ni(skb);
}
+ if (ptp_data->extts_enabled)
+ sja1105_extts_poll(priv);
+
mutex_unlock(&ptp_data->lock);
/* Don't restart */
@@ -595,36 +622,21 @@ static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return rc;
}
-static void sja1105_ptp_extts_work(struct work_struct *work)
+static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data)
{
- struct delayed_work *dw = to_delayed_work(work);
- struct sja1105_ptp_data *ptp_data = extts_to_data(dw);
- struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
- const struct sja1105_regs *regs = priv->info->regs;
- struct ptp_clock_event event;
- u64 ptpsyncts = 0;
- int rc;
-
- mutex_lock(&ptp_data->lock);
-
- rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
- NULL);
- if (rc < 0)
- dev_err_ratelimited(priv->ds->dev,
- "Failed to read PTPSYNCTS: %d\n", rc);
+ unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) *
+ SJA1105_EXTTS_INTERVAL;
- if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
- event.index = 0;
- event.type = PTP_CLOCK_EXTTS;
- event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
- ptp_clock_event(ptp_data->clock, &event);
+ mod_timer(&ptp_data->extts_timer, expires);
+}
- ptp_data->ptpsyncts = ptpsyncts;
- }
+static void sja1105_ptp_extts_timer(struct timer_list *t)
+{
+ struct sja1105_ptp_data *ptp_data = extts_to_data(t);
- mutex_unlock(&ptp_data->lock);
+ ptp_schedule_worker(ptp_data->clock, 0);
- schedule_delayed_work(&ptp_data->extts_work, SJA1105_EXTTS_INTERVAL);
+ sja1105_ptp_extts_setup_timer(ptp_data);
}
static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
@@ -771,11 +783,12 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (rc)
return rc;
+ priv->ptp_data.extts_enabled = on;
+
if (on)
- schedule_delayed_work(&priv->ptp_data.extts_work,
- SJA1105_EXTTS_INTERVAL);
+ sja1105_ptp_extts_setup_timer(&priv->ptp_data);
else
- cancel_delayed_work_sync(&priv->ptp_data.extts_work);
+ del_timer_sync(&priv->ptp_data.extts_timer);
return 0;
}
@@ -858,7 +871,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
ptp_data->cmd.corrclk4ts = true;
ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
- INIT_DELAYED_WORK(&ptp_data->extts_work, sja1105_ptp_extts_work);
+ timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0);
return sja1105_ptp_reset(ds);
}
@@ -871,7 +884,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_delayed_work_sync(&ptp_data->extts_work);
+ del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 6408d1158f2d..3daa33e98e77 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -4,6 +4,8 @@
#ifndef _SJA1105_PTP_H
#define _SJA1105_PTP_H
+#include <linux/timer.h>
+
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
/* Timestamps are in units of 8 ns clock ticks (equivalent to
@@ -72,13 +74,14 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
- struct delayed_work extts_work;
+ struct timer_list extts_timer;
struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
/* Serializes all operations on the PTP hardware clock */
struct mutex lock;
+ bool extts_enabled;
u64 ptpsyncts;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c38a4b8a14cb..611875ef2cd1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -786,7 +786,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
int err = 0;
if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
- err = EBADRQC;
+ err = -EBADRQC;
goto err_exit;
}
for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b4f0c638c467..31fb5a28e1c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3711,67 +3711,189 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
return 0;
}
-static void bnxt_free_port_stats(struct bnxt *bp)
+static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
{
- struct pci_dev *pdev = bp->pdev;
+ kfree(stats->hw_masks);
+ stats->hw_masks = NULL;
+ kfree(stats->sw_stats);
+ stats->sw_stats = NULL;
+ if (stats->hw_stats) {
+ dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
- bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
+ bool alloc_masks)
+{
+ stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
- if (bp->hw_rx_port_stats) {
- dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
- bp->hw_rx_port_stats,
- bp->hw_rx_port_stats_map);
- bp->hw_rx_port_stats = NULL;
- }
+ stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->sw_stats)
+ goto stats_mem_err;
- if (bp->hw_tx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
- bp->hw_tx_port_stats_ext,
- bp->hw_tx_port_stats_ext_map);
- bp->hw_tx_port_stats_ext = NULL;
+ if (alloc_masks) {
+ stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
+ if (!stats->hw_masks)
+ goto stats_mem_err;
}
+ return 0;
+
+stats_mem_err:
+ bnxt_free_stats_mem(bp, stats);
+ return -ENOMEM;
+}
+
+static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = mask;
+}
+
+static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
+}
+
+static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
+ struct bnxt_stats_mem *stats)
+{
+ struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qstats_ext_input req = {0};
+ __le64 *hw_masks;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
+ req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qstat_exit;
+
+ hw_masks = &resp->rx_ucast_pkts;
+ bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
+
+qstat_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
- if (bp->hw_rx_port_stats_ext) {
- dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- bp->hw_rx_port_stats_ext,
- bp->hw_rx_port_stats_ext_map);
- bp->hw_rx_port_stats_ext = NULL;
+static void bnxt_init_stats(struct bnxt *bp)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+ __le64 *rx_stats, *tx_stats;
+ int rc, rx_count, tx_count;
+ u64 *rx_masks, *tx_masks;
+ u64 mask;
+ u8 flags;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ rc = bnxt_hwrm_func_qstat_ext(bp, stats);
+ if (rc) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ mask = (1ULL << 48) - 1;
+ else
+ mask = -1ULL;
+ bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ stats = &bp->port_stats;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats) / 8;
+ tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ tx_count = sizeof(struct tx_port_stats) / 8;
+
+ flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
- if (bp->hw_pcie_stats) {
- dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- bp->hw_pcie_stats, bp->hw_pcie_stats_map);
- bp->hw_pcie_stats = NULL;
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
+ bnxt_hwrm_port_qstats(bp, 0);
+ }
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ stats = &bp->rx_port_stats_ext;
+ rx_stats = stats->hw_stats;
+ rx_masks = stats->hw_masks;
+ rx_count = sizeof(struct rx_port_stats_ext) / 8;
+ stats = &bp->tx_port_stats_ext;
+ tx_stats = stats->hw_stats;
+ tx_masks = stats->hw_masks;
+ tx_count = sizeof(struct tx_port_stats_ext) / 8;
+
+ flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ rc = bnxt_hwrm_port_qstats_ext(bp, flags);
+ if (rc) {
+ mask = (1ULL << 40) - 1;
+
+ bnxt_fill_masks(rx_masks, mask, rx_count);
+ if (tx_stats)
+ bnxt_fill_masks(tx_masks, mask, tx_count);
+ } else {
+ bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
+ if (tx_stats)
+ bnxt_copy_hw_masks(tx_masks, tx_stats,
+ tx_count);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ }
}
}
+static void bnxt_free_port_stats(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+
+ bnxt_free_stats_mem(bp, &bp->port_stats);
+ bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
+ bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
+}
+
static void bnxt_free_ring_stats(struct bnxt *bp)
{
- struct pci_dev *pdev = bp->pdev;
- int size, i;
+ int i;
if (!bp->bnapi)
return;
- size = bp->hw_ring_stats_size;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- if (cpr->hw_stats) {
- dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
- cpr->hw_stats_map);
- cpr->hw_stats = NULL;
- }
+ bnxt_free_stats_mem(bp, &cpr->stats);
}
}
static int bnxt_alloc_stats(struct bnxt *bp)
{
u32 size, i;
- struct pci_dev *pdev = bp->pdev;
+ int rc;
size = bp->hw_ring_stats_size;
@@ -3779,11 +3901,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
- &cpr->hw_stats_map,
- GFP_KERNEL);
- if (!cpr->hw_stats)
- return -ENOMEM;
+ cpr->stats.len = size;
+ rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
+ if (rc)
+ return rc;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -3791,22 +3912,14 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
- if (bp->hw_rx_port_stats)
+ if (bp->port_stats.hw_stats)
goto alloc_ext_stats;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
-
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->port_stats.len = BNXT_PORT_STATS_SIZE;
+ rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
+ if (rc)
+ return rc;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
@@ -3815,41 +3928,28 @@ alloc_ext_stats:
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
+ if (bp->rx_port_stats_ext.hw_stats)
goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
+ bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- goto alloc_pcie_stats;
+ if (bp->tx_port_stats_ext.hw_stats)
+ return 0;
if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
+ rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
+ /* Extended stats are optional */
+ if (rc)
+ return 0;
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
-
-alloc_pcie_stats:
- if (bp->hw_pcie_stats ||
- !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
- return 0;
-
- bp->hw_pcie_stats =
- dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
- &bp->hw_pcie_stats_map, GFP_KERNEL);
- if (!bp->hw_pcie_stats)
- return 0;
-
- bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -3949,6 +4049,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
+ if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
+ bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -4052,6 +4154,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
+ bnxt_init_stats(bp);
rc = bnxt_alloc_ntp_fltrs(bp);
if (rc)
@@ -6458,7 +6561,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+ req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -7489,7 +7592,89 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
+{
+ u64 sw_tmp;
+
+ sw_tmp = (*sw & ~mask) | hw;
+ if (hw < (*sw & mask))
+ sw_tmp += mask + 1;
+ WRITE_ONCE(*sw, sw_tmp);
+}
+
+static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
+ int count, bool ignore_zero)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
+
+ if (ignore_zero && !hw)
+ continue;
+
+ if (masks[i] == -1ULL)
+ sw_stats[i] = hw;
+ else
+ bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
+ }
+}
+
+static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
+{
+ if (!stats->hw_stats)
+ return;
+
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ stats->hw_masks, stats->len / 8, false);
+}
+
+static void bnxt_accumulate_all_stats(struct bnxt *bp)
+{
+ struct bnxt_stats_mem *ring0_stats;
+ bool ignore_zero = false;
+ int i;
+
+ /* Chip bug. Counter intermittently becomes 0. */
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ ignore_zero = true;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_stats_mem *stats;
+
+ cpr = &bnapi->cp_ring;
+ stats = &cpr->stats;
+ if (!i)
+ ring0_stats = stats;
+ __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
+ ring0_stats->hw_masks,
+ ring0_stats->len / 8, ignore_zero);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ struct bnxt_stats_mem *stats = &bp->port_stats;
+ __le64 *hw_stats = stats->hw_stats;
+ u64 *sw_stats = stats->sw_stats;
+ u64 *masks = stats->hw_masks;
+ int cnt;
+
+ cnt = sizeof(struct rx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+
+ hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+ cnt = sizeof(struct tx_port_stats) / 8;
+ __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
+ }
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ bnxt_accumulate_stats(&bp->rx_port_stats_ext);
+ bnxt_accumulate_stats(&bp->tx_port_stats_ext);
+ }
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
{
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7497,14 +7682,19 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
+ req.flags = flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
req.port_id = cpu_to_le16(pf->port_id);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET);
+ req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
{
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
@@ -7516,14 +7706,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
+ if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
+ return -EOPNOTSUPP;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
+ req.flags = flags;
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
- req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- tx_stat_size = bp->hw_tx_port_stats_ext ?
- sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
+ tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
+ sizeof(struct tx_port_stats_ext) : 0;
req.tx_stat_size = cpu_to_le16(tx_stat_size);
- req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -7534,6 +7728,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ if (flags)
+ goto qstats_done;
+
if (bp->fw_tx_stats_ext_size <=
offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -7574,19 +7771,6 @@ qstats_done:
return rc;
}
-static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
-{
- struct hwrm_pcie_qstats_input req = {0};
-
- if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
- return 0;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
- req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
- req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
@@ -8608,6 +8792,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (BNXT_PF(bp))
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
+ bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
+
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -9610,34 +9797,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
{
int i;
-
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+ u64 *sw = cpr->stats.sw_stats;
- stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
- stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
- stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
- stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
- stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
stats->rx_missed_errors +=
- le64_to_cpu(hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
- stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
- stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
}
}
@@ -9675,19 +9861,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- struct rx_port_stats *rx = bp->hw_rx_port_stats;
- struct tx_port_stats *tx = bp->hw_tx_port_stats;
-
- stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
- stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
- stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
- le64_to_cpu(rx->rx_ovrsz_frames) +
- le64_to_cpu(rx->rx_runt_frames);
- stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
- le64_to_cpu(rx->rx_jbr_frames);
- stats->collisions = le64_to_cpu(tx->tx_total_collisions);
- stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
- stats->tx_errors = le64_to_cpu(tx->tx_err);
+ u64 *rx = bp->port_stats.sw_stats;
+ u64 *tx = bp->port_stats.sw_stats +
+ BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ stats->rx_crc_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ stats->rx_frame_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ stats->rx_length_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
+ stats->rx_errors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ stats->collisions =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
+ stats->tx_fifo_errors =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
+ stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
}
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
@@ -10033,6 +10226,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf)
+{
+ struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_read_direct_input req = {0};
+ __le32 *dbg_reg_buf;
+ dma_addr_t mapping;
+ int rc, i;
+
+ dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
+ &mapping, GFP_KERNEL);
+ if (!dbg_reg_buf)
+ return -ENOMEM;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
+ req.host_dest_addr = cpu_to_le64(mapping);
+ req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+ req.read_len32 = cpu_to_le32(num_words);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc || resp->error_code) {
+ rc = -EIO;
+ goto dbg_rd_reg_exit;
+ }
+ for (i = 0; i < num_words; i++)
+ reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
+
+dbg_rd_reg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
+ return rc;
+}
+
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
u32 ring_id, u32 *prod, u32 *cons)
{
@@ -10177,8 +10402,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
- bp->stats_coal_ticks) {
+ if (bp->link_info.link_up && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10464,9 +10688,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
- bnxt_hwrm_port_qstats(bp);
- bnxt_hwrm_port_qstats_ext(bp);
- bnxt_hwrm_pcie_qstats(bp);
+ bnxt_hwrm_port_qstats(bp, 0);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+ bnxt_accumulate_all_stats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2acd7f958246..5a13eb66beda 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -919,6 +919,14 @@ struct bnxt_sw_stats {
struct bnxt_cmn_sw_stats cmn;
};
+struct bnxt_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -943,8 +951,7 @@ struct bnxt_cp_ring_info {
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
- struct ctx_hw_stats *hw_stats;
- dma_addr_t hw_stats_map;
+ struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
struct bnxt_sw_stats sw_stats;
@@ -1135,6 +1142,50 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 link_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1253,6 +1304,9 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
+#define CHIMP_REG_VIEW_ADDR \
+ ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
@@ -1566,7 +1620,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
- #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1719,6 +1772,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1733,17 +1787,9 @@ struct bnxt {
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev;
- struct rx_port_stats *hw_rx_port_stats;
- struct tx_port_stats *hw_tx_port_stats;
- struct rx_port_stats_ext *hw_rx_port_stats_ext;
- struct tx_port_stats_ext *hw_tx_port_stats_ext;
- struct pcie_ctx_hw_stats *hw_pcie_stats;
- dma_addr_t hw_rx_port_stats_map;
- dma_addr_t hw_tx_port_stats_map;
- dma_addr_t hw_rx_port_stats_ext_map;
- dma_addr_t hw_tx_port_stats_ext_map;
- dma_addr_t hw_pcie_stats_map;
- int hw_port_stats_size;
+ struct bnxt_stats_mem port_stats;
+ struct bnxt_stats_mem rx_port_stats_ext;
+ struct bnxt_stats_mem tx_port_stats_ext;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
@@ -1885,12 +1931,27 @@ struct bnxt {
struct device *hwmon_dev;
};
+#define BNXT_GET_RING_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
+
+#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct rx_port_stats, counter) / 8))
+
+#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
+ (*((sw) + offsetof(struct tx_port_stats, counter) / 8))
+
+#define BNXT_PORT_STATS_SIZE \
+ (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
+
+#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
+ (sizeof(struct rx_port_stats) + 512)
+
#define BNXT_RX_STATS_OFFSET(counter) \
(offsetof(struct rx_port_stats, counter) / 8)
#define BNXT_TX_STATS_OFFSET(counter) \
((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
+ BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
@@ -1898,9 +1959,6 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
-#define BNXT_PCIE_STATS_OFFSET(counter) \
- (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
-
#define BNXT_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
#define BNXT_HW_FEATURE_VLAN_ALL_TX \
@@ -2062,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 02b27551d34d..8e90224c43a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
- __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ __le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 25252d5beec8..64da654f1038 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = {
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
- "rx_drops",
+ "rx_errors",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
@@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
+ "tx_errors",
"tx_discards",
- "tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
@@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
-#define BNXT_PCIE_STATS_ENTRY(counter) \
- { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
-
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -454,24 +451,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-static const struct {
- long offset;
- char string[ETH_GSTRING_LEN];
-} bnxt_pcie_stats_arr[] = {
- BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
- BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
- BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
- BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
- BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
-};
-
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -479,7 +458,6 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
-#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
@@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS)
- num_stats += BNXT_NUM_PCIE_STATS;
-
return num_stats;
}
@@ -584,19 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw_stats = cpr->stats.sw_stats;
u64 *sw;
int k;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (is_tx_ring(bp, i)) {
k = NUM_RING_RX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
}
if (!tpa_stats || !is_rx_ring(bp, i))
goto skip_tpa_ring_stats;
@@ -604,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
tpa_stats; j++, k++)
- buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j] = sw_stats[k];
skip_tpa_ring_stats:
sw = (u64 *)&cpr->sw_stats.rx;
@@ -618,9 +593,9 @@ skip_tpa_ring_stats:
buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
+ BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
@@ -628,60 +603,50 @@ skip_tpa_ring_stats:
skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
- __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+ u64 *port_stats = bp->port_stats.sw_stats;
- for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats +
- bnxt_port_stats_arr[i].offset));
- }
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
+ buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
- __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
+ u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
+ u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(rx_port_stats_ext +
- bnxt_port_stats_ext_arr[i].offset));
+ buf[j] = *(rx_port_stats_ext +
+ bnxt_port_stats_ext_arr[i].offset);
}
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
- buf[j] = le64_to_cpu(*(tx_port_stats_ext +
- bnxt_tx_port_stats_ext_arr[i].offset));
+ buf[j] = *(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset);
}
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
- buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ buf[j] = *(tx_port_stats_ext + n);
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
-
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
- buf[j] = le64_to_cpu(*(pcie_stats +
- bnxt_pcie_stats_arr[i].offset));
- }
- }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -782,12 +747,6 @@ skip_tpa_stats:
}
}
}
- if (bp->flags & BNXT_FLAG_PCIE_STATS) {
- for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
- strcpy(buf, bnxt_pcie_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
- }
- }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -1365,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->regdump_len = 0;
}
+static int bnxt_get_regs_len(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int reg_len;
+
+ reg_len = BNXT_PXP_REG_LEN;
+
+ if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
+ reg_len += sizeof(struct pcie_ctx_hw_stats);
+
+ return reg_len;
+}
+
+static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *_p)
+{
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ regs->version = 0;
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
+ sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr, GFP_KERNEL);
+ if (!hw_pcie_stats)
+ return;
+
+ regs->version = 1;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ __le64 *src = (__le64 *)hw_pcie_stats;
+ u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+ int i;
+
+ for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+ dst[i] = le64_to_cpu(src[i]);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
+ hw_pcie_stats_addr);
+}
+
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3640,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_regs_len = bnxt_get_regs_len,
+ .get_regs = bnxt_get_regs,
.get_wol = bnxt_get_wol,
.set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index dddbca1d052c..34f44ddfad79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -84,6 +84,8 @@ struct hwrm_dbg_cmn_output {
ETH_RESET_PHY | ETH_RESET_RAM) \
<< ETH_RESET_SHARED_SHIFT)
+#define BNXT_PXP_REG_LEN 0x3110
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7e9235c8d21e..c4af6bf15e36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -169,9 +169,14 @@ struct cmd_nums {
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
#define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
@@ -224,6 +229,7 @@ struct cmd_nums {
#define HWRM_FW_IPC_MAILBOX 0xccUL
#define HWRM_FW_ECN_CFG 0xcdUL
#define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -337,6 +343,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -353,24 +360,30 @@ struct cmd_nums {
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
#define HWRM_TF_SESSION_ATTACH 0x2c7UL
- #define HWRM_TF_SESSION_CLOSE 0x2c8UL
- #define HWRM_TF_SESSION_QCFG 0x2c9UL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
- #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
- #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
- #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
- #define HWRM_TF_EXT_EM_OP 0x2ddUL
- #define HWRM_TF_EXT_EM_CFG 0x2deUL
- #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
- #define HWRM_TF_TCAM_SET 0x2eeUL
- #define HWRM_TF_TCAM_GET 0x2efUL
- #define HWRM_TF_TCAM_MOVE 0x2f0UL
- #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
+ #define HWRM_TF_EXT_EM_OP 0x2e7UL
+ #define HWRM_TF_EXT_EM_CFG 0x2e8UL
+ #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -391,6 +404,7 @@ struct cmd_nums {
#define HWRM_DBG_QCAPS 0xff20UL
#define HWRM_DBG_QCFG 0xff21UL
#define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -464,8 +478,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 33
-#define HWRM_VERSION_STR "1.10.1.33"
+#define HWRM_VERSION_RSVD 54
+#define HWRM_VERSION_STR "1.10.1.54"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1094,6 +1108,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
__le16 num_tx_rings;
@@ -1189,10 +1205,16 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
u8 unused_0[2];
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- u8 unused_1[3];
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ u8 max_schqs;
+ u8 unused_1[2];
u8 valid;
};
@@ -1226,6 +1248,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1321,7 +1345,7 @@ struct hwrm_func_qcfg_output {
u8 valid;
};
-/* hwrm_func_cfg_input (size:704b/88B) */
+/* hwrm_func_cfg_input (size:768b/96B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1352,30 +1376,35 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
__le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1449,6 +1478,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
+ __le16 schq_id;
+ u8 unused_0[6];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1507,7 +1538,7 @@ struct hwrm_func_qstats_output {
u8 valid;
};
-/* hwrm_func_qstats_ext_input (size:192b/24B) */
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
struct hwrm_func_qstats_ext_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1520,7 +1551,12 @@ struct hwrm_func_qstats_ext_input {
#define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
#define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[5];
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
};
/* hwrm_func_qstats_ext_output (size:1472b/184B) */
@@ -1533,15 +1569,15 @@ struct hwrm_func_qstats_ext_output {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -2376,33 +2412,39 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
__le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -2415,7 +2457,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2446,7 +2487,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2464,7 +2504,6 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2488,11 +2527,19 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2[2];
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
__le32 tx_lpi_timer;
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le32 unused_3;
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ u8 unused_2[2];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -2526,7 +2573,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2537,7 +2584,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 unused_0;
+ u8 link_signal_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2574,7 +2624,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2586,7 +2635,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2611,7 +2659,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2629,7 +2676,6 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2763,13 +2809,21 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
@@ -2778,7 +2832,24 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- u8 unused_2[7];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ __le16 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 unused_0[7];
u8 valid;
};
@@ -3304,19 +3375,20 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+/* hwrm_port_phy_qcaps_output (size:256b/32B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -3339,7 +3411,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
__le16 supported_speeds_auto_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
@@ -3355,7 +3426,6 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
__le16 supported_speeds_eee_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
@@ -3372,8 +3442,18 @@ struct hwrm_port_phy_qcaps_output {
__le32 valid_tx_lpi_timer_high;
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ u8 unused_0[3];
+ u8 valid;
};
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
@@ -3812,7 +3892,7 @@ struct hwrm_queue_qportcfg_input {
u8 unused_0;
};
-/* hwrm_queue_qportcfg_output (size:256b/32B) */
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
struct hwrm_queue_qportcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3898,6 +3978,49 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0;
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
u8 valid;
};
@@ -4938,6 +5061,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -4947,7 +5071,12 @@ struct hwrm_vnic_cfg_input {
__le16 default_rx_ring_id;
__le16 default_cmpl_ring_id;
__le16 queue_id;
- u8 unused0[6];
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 unused0[5];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -4989,6 +5118,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5155,15 +5285,18 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
__le32 enables;
#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
__le32 vnic_id;
__le16 jumbo_thresh;
__le16 hds_offset;
__le16 hds_threshold;
- u8 unused_0[6];
+ __le16 max_bds;
+ u8 unused_0[4];
};
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
@@ -5231,6 +5364,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5246,7 +5380,7 @@ struct hwrm_ring_alloc_input {
__le32 fbo;
u8 page_size;
u8 page_tbl_depth;
- u8 unused_1[2];
+ __le16 schq_id;
__le32 length;
__le16 logical_id;
__le16 cmpl_ring_id;
@@ -5344,11 +5478,12 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -5529,6 +5664,7 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
@@ -6816,15 +6952,15 @@ struct ctx_hw_stats {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6840,15 +6976,15 @@ struct ctx_hw_stats_ext {
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
__le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
@@ -6915,7 +7051,9 @@ struct hwrm_stat_ctx_query_input {
__le16 target_id;
__le64 resp_addr;
__le32 stat_ctx_id;
- u8 unused_0[4];
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
};
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
@@ -6948,6 +7086,50 @@ struct hwrm_stat_ctx_query_output {
u8 valid;
};
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
__le16 req_type;
@@ -7497,6 +7679,29 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -7507,7 +7712,8 @@ struct coredump_segment_record {
u8 seg_flags;
u8 compress_flags;
#define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[6];
+ u8 unused_0[2];
+ __le32 segment_len;
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7620,7 +7826,8 @@ struct hwrm_dbg_ring_info_get_input {
#define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
#define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le32 fw_ring_id;
};
@@ -7633,7 +7840,8 @@ struct hwrm_dbg_ring_info_get_output {
__le16 resp_len;
__le32 producer_index;
__le32 consumer_index;
- u8 unused_0[7];
+ __le32 cag_vector_ctrl;
+ u8 unused_0[3];
u8 valid;
};
@@ -7922,6 +8130,7 @@ struct hwrm_nvm_install_update_input {
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
u8 unused_0[2];
};
@@ -8101,7 +8310,14 @@ struct hwrm_selftest_qlist_output {
char test5_name[32];
char test6_name[32];
char test7_name[32];
- u8 unused_2[7];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 392e32c7122a..cc2ee4d0bd18 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 43d11c38b38a..4cddd628d41b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
CN23XX_PCIE_SRIOV_FDL_MASK);
} else {
- ret = EINVAL;
+ ret = -EINVAL;
/* Under some virtual environments, extended PCI regs are
* inaccessible, in which case the above read will have failed.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 934115d18488..ac32facaa427 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1056,7 +1056,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct)
list_for_each_safe(temp, tmp2, &freelist) {
list_del(temp);
- vfree(temp);
+ kfree(temp);
}
}
@@ -1152,13 +1152,10 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev,
"Adding opcode to dispatch list linked list\n");
- dispatch = (struct octeon_dispatch *)
- vmalloc(sizeof(struct octeon_dispatch));
- if (!dispatch) {
- dev_err(&oct->pci_dev->dev,
- "No memory to add dispatch function\n");
+ dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL);
+ if (!dispatch)
return 1;
- }
+
dispatch->opcode = combined_opcode;
dispatch->dispatch_fn = fn;
dispatch->arg = fn_arg;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index adbc0d088070..9cb8b229c1b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1438,6 +1438,8 @@ enum {
NAT_MODE_ALL /* NAT on entire 4-tuple */
};
+#define CXGB4_FILTER_TYPE_MAX 2
+
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 12ef9ddd1e54..9f3173f86eed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -31,7 +31,7 @@ enum cxgb4_ethtool_tests {
};
static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
- "Loop back test",
+ "Loop back test (offline)",
};
static const char * const flash_region_strings[] = {
@@ -2095,12 +2095,13 @@ static void cxgb4_self_test(struct net_device *netdev,
memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
- if (!(adap->flags & CXGB4_FW_OK)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
+ !(adap->flags & CXGB4_FW_OK)) {
eth_test->flags |= ETH_TEST_FL_FAILED;
return;
}
- if (eth_test->flags == ETH_TEST_FL_OFFLINE)
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE)
cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
if (data[CXGB4_ETHTOOL_LB_TEST])
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 6251444ffa58..f642c1b475c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -80,6 +80,19 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_rule *rule,
struct ch_filter_specification *fs)
{
+ u16 addr_type = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
u16 ethtype_key, ethtype_mask;
@@ -102,7 +115,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.proto = match.mask->ip_proto;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
@@ -117,7 +130,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index e377e50c2492..2e309f6673f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -231,8 +231,26 @@ static void cxgb4_matchall_mirror_free(struct net_device *dev)
tc_port_matchall->ingress.viid_mirror = 0;
}
-static int cxgb4_matchall_alloc_filter(struct net_device *dev,
- struct tc_cls_matchall_offload *cls)
+static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
+ &tc_port_matchall->ingress.fs[filter_type]);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.tid[filter_type] = 0;
+ return 0;
+}
+
+static int cxgb4_matchall_add_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ u8 filter_type)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall;
@@ -244,28 +262,24 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
/* Get a free filter entry TID, where we can insert this new
* rule. Only insert rule if its prio doesn't conflict with
* existing rules.
- *
- * 1 slot is enough to create a wildcard matchall VIID rule.
*/
- fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio);
+ fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
+ false, cls->common.prio);
if (fidx < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
}
- ret = cxgb4_matchall_mirror_alloc(dev, cls);
- if (ret)
- return ret;
-
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
- fs = &tc_port_matchall->ingress.fs;
+ fs = &tc_port_matchall->ingress.fs[filter_type];
memset(fs, 0, sizeof(*fs));
if (fidx < adap->tids.nhpftids)
fs->prio = 1;
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
+ fs->type = filter_type;
fs->hitcnts = 1;
fs->val.pfvf_vld = 1;
@@ -276,13 +290,39 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
ret = cxgb4_set_filter(dev, fidx, fs);
if (ret)
- goto out_free;
+ return ret;
+
+ tc_port_matchall->ingress.tid[filter_type] = fidx;
+ return 0;
+}
+
+static int cxgb4_matchall_alloc_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret, i;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ ret = cxgb4_matchall_mirror_alloc(dev, cls);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_matchall_add_filter(dev, cls, i);
+ if (ret)
+ goto out_free;
+ }
- tc_port_matchall->ingress.tid = fidx;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
out_free:
+ while (i-- > 0)
+ cxgb4_matchall_del_filter(dev, i);
+
cxgb4_matchall_mirror_free(dev);
return ret;
}
@@ -293,20 +333,21 @@ static int cxgb4_matchall_free_filter(struct net_device *dev)
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret;
+ u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
- ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
- &tc_port_matchall->ingress.fs);
- if (ret)
- return ret;
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_matchall_del_filter(dev, i);
+ if (ret)
+ return ret;
+ }
cxgb4_matchall_mirror_free(dev);
tc_port_matchall->ingress.packets = 0;
tc_port_matchall->ingress.bytes = 0;
tc_port_matchall->ingress.last_used = 0;
- tc_port_matchall->ingress.tid = 0;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
return 0;
}
@@ -362,8 +403,12 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (ingress) {
+ /* All the filter types of this matchall rule save the
+ * same cookie. So, checking for the first one is
+ * enough.
+ */
if (cls_matchall->cookie !=
- tc_port_matchall->ingress.fs.tc_cookie)
+ tc_port_matchall->ingress.fs[0].tc_cookie)
return -ENOENT;
return cxgb4_matchall_free_filter(dev);
@@ -379,21 +424,29 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall)
{
+ u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct cxgb4_matchall_ingress_entry *ingress;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- u64 packets, bytes;
int ret;
+ u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
return -ENOENT;
- ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
- &packets, &bytes,
- tc_port_matchall->ingress.fs.hash);
- if (ret)
- return ret;
+ ingress = &tc_port_matchall->ingress;
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
+ &tmp_packets, &tmp_bytes,
+ ingress->fs[i].hash);
+ if (ret)
+ return ret;
+
+ packets += tmp_packets;
+ bytes += tmp_bytes;
+ }
if (tc_port_matchall->ingress.packets != packets) {
flow_stats_update(&cls_matchall->stats,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
index e264b6e606c4..fe7ec423a4c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
@@ -19,8 +19,9 @@ struct cxgb4_matchall_egress_entry {
struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
- u32 tid; /* Index to hardware filter entry */
- struct ch_filter_specification fs; /* Filter entry */
+ u32 tid[CXGB4_FILTER_TYPE_MAX]; /* Index to hardware filter entries */
+ /* Filter entries */
+ struct ch_filter_specification fs[CXGB4_FILTER_TYPE_MAX];
u16 viid_mirror; /* Identifier for allocated Mirror VI */
u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8d13ea370db1..66e67b24a887 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
+ clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(port->pclk);
return ret;
+ }
ret = register_netdev(netdev);
if (!ret) {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 17f6bcafc944..83b1e974bff0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2325,7 +2325,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
/* Update NAPI statistics */
ch->stats.cdan++;
- napi_schedule_irqoff(&ch->napi);
+ napi_schedule(&ch->napi);
}
/* Allocate and configure a DPCON object */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index f50353cbb4db..f78ca7b343d2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -270,7 +270,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
- napi_schedule_irqoff(&v->napi);
+ napi_schedule(&v->napi);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index a9939550fa8f..1c4a535890da 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1130,7 +1130,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
!is_zero_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot match on both source and destination MAC");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
@@ -1138,7 +1138,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
if (!is_broadcast_ether_addr(match.mask->dst)) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on destination MAC not supported");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
ether_addr_copy(filter->sid.dst_mac, match.key->dst);
@@ -1149,7 +1149,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
if (!is_broadcast_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on source MAC not supported");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
ether_addr_copy(filter->sid.src_mac, match.key->src);
@@ -1157,7 +1157,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
} else {
NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f151d6e111dd..ef67e8599b39 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1398,8 +1398,7 @@ static void enable_time_stamp(struct fman *fman)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u16 fm_clk_freq = fman->state->fm_clk_freq;
- u32 tmp, intgr, ts_freq;
- u64 frac;
+ u32 tmp, intgr, ts_freq, frac;
ts_freq = (u32)(1 << fman->state->count1_micro_bit);
/* configure timestamp so that bit 8 will count 1 microsecond
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 004c266802a8..bce3c9398887 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1200,7 +1200,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->multicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
@@ -1213,7 +1213,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->unicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index dd6d0526f6c1..19f327efdaff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size)
struct eth_hash_t *hash;
/* Allocate address hash table */
- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+ hash = kmalloc(sizeof(*hash), GFP_KERNEL);
if (!hash)
return NULL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index a5500ede4070..645764abdaae 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -852,7 +852,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
tmp = ioread32be(&regs->command_config);
tmp &= ~CMD_CFG_PFC_MODE;
- priority = 0;
iowrite32be(tmp, &regs->command_config);
@@ -982,7 +981,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 87b26f063cc8..c27df153f895 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1767,6 +1767,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct fman_port *port;
struct fman *fman;
struct device_node *fm_node, *port_node;
+ struct platform_device *fm_pdev;
struct resource res;
struct resource *dev_res;
u32 val;
@@ -1791,8 +1792,14 @@ static int fman_port_probe(struct platform_device *of_dev)
goto return_err;
}
- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+ fm_pdev = of_find_device_by_node(fm_node);
of_node_put(fm_node);
+ if (!fm_pdev) {
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
goto return_err;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 8c7eb878d5b4..41946b16f6c7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -626,7 +626,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b1bea0309a7c..87776ce3539b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
- if (type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
- int ret;
-
- ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret < 0))
- return ret;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -4153,8 +4149,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
- netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bb4a6327035d..36575e72a915 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
- spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
- spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
-
- spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
- memset(&new_tuples, 0, sizeof(new_tuples));
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
- spin_lock_bh(&hdev->fd_rule_lock);
-
/* when there is already fd rule existed add by user,
* arfs should not work
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
- if (!enable)
+
+ if (!enable) {
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
- else
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ } else {
hclge_restore_fd_entries(handle);
+ }
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
+ spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a10b022d1951..9162856de1b1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
+ int ret;
rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- rtnl_unlock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
- else
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
- rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index befd925c03dc..0a1e20edf7cf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -98,10 +98,14 @@ struct hinic_dev {
int lb_pkt_len;
u8 *lb_test_rx_buf;
struct devlink *devlink;
+ bool cable_unplugged;
+ bool module_unrecognized;
};
struct hinic_devlink_priv {
struct hinic_hwdev *hwdev;
+ struct devlink_health_reporter *hw_fault_reporter;
+ struct devlink_health_reporter *fw_fault_reporter;
};
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index a40a10ac1ee9..c6adc776f3c8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -16,9 +16,9 @@
#include <net/devlink.h>
#include <linux/firmware.h>
-#include "hinic_dev.h"
#include "hinic_port.h"
#include "hinic_devlink.h"
+#include "hinic_hw_dev.h"
static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
u32 image_size, struct host_image_st *host_image)
@@ -317,12 +317,292 @@ void hinic_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-int hinic_devlink_register(struct devlink *devlink, struct device *dev)
+int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev)
{
+ struct devlink *devlink = priv_to_devlink(priv);
+
return devlink_register(devlink, dev);
}
-void hinic_devlink_unregister(struct devlink *devlink)
+void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
{
+ struct devlink *devlink = priv_to_devlink(priv);
+
devlink_unregister(devlink);
}
+
+static int chip_fault_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
+{
+ char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion"};
+ char level_str[FAULT_SHOW_STR_LEN + 1] = {0};
+ u8 level;
+ int err;
+
+ level = event->event.chip.err_level;
+ if (level < FAULT_LEVEL_MAX)
+ strncpy(level_str, fault_level[level], strlen(fault_level[level]));
+ else
+ strncpy(level_str, "Unknown", strlen("Unknown"));
+
+ if (level == FAULT_LEVEL_SERIOUS_FLR) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
+ (u32)event->event.chip.func_id);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr",
+ event->event.chip.err_csr_addr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value",
+ event->event.chip.err_csr_value);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fault_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
+{
+ char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr timeout",
+ "reg rd timeout", "reg wr timeout", "phy fault"};
+ char type_str[FAULT_SHOW_STR_LEN + 1] = {0};
+ int err;
+
+ if (event->type < FAULT_TYPE_MAX)
+ strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type]));
+ else
+ strncpy(type_str, "Unknown", strlen("Unknown"));
+
+ err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data",
+ event->event.val, sizeof(event->event.val));
+ if (err)
+ return err;
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ err = chip_fault_show(fmsg, event);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_UCODE:
+ err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl",
+ event->event.mem_timeout.err_csr_ctrl);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data",
+ event->event.mem_timeout.err_csr_data);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab",
+ event->event.mem_timeout.ctrl_tab);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "mem_index",
+ event->event.mem_timeout.mem_index);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_PHY_FAULT:
+ err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data);
+ if (err)
+ return err;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ if (priv_ctx)
+ return fault_report_show(fmsg, priv_ctx);
+
+ return 0;
+}
+
+static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_mgmt_watchdog_info *watchdog_info)
+{
+ int err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info",
+ watchdog_info->reg, sizeof(watchdog_info->reg));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)",
+ watchdog_info->data, sizeof(watchdog_info->data));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ if (priv_ctx)
+ return mgmt_watchdog_report_show(fmsg, priv_ctx);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops hinic_hw_fault_reporter_ops = {
+ .name = "hw",
+ .dump = hinic_hw_reporter_dump,
+};
+
+static const struct devlink_health_reporter_ops hinic_fw_fault_reporter_ops = {
+ .name = "fw",
+ .dump = hinic_fw_reporter_dump,
+};
+
+int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv);
+
+ priv->hw_fault_reporter =
+ devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
+ 0, priv);
+ if (IS_ERR(priv->hw_fault_reporter)) {
+ dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
+ PTR_ERR(priv->hw_fault_reporter));
+ return PTR_ERR(priv->hw_fault_reporter);
+ }
+
+ priv->fw_fault_reporter =
+ devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
+ 0, priv);
+ if (IS_ERR(priv->fw_fault_reporter)) {
+ dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
+ PTR_ERR(priv->fw_fault_reporter));
+ devlink_health_reporter_destroy(priv->hw_fault_reporter);
+ priv->hw_fault_reporter = NULL;
+ return PTR_ERR(priv->fw_fault_reporter);
+ }
+
+ return 0;
+}
+
+void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv)
+{
+ if (!IS_ERR_OR_NULL(priv->fw_fault_reporter)) {
+ devlink_health_reporter_destroy(priv->fw_fault_reporter);
+ priv->fw_fault_reporter = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(priv->hw_fault_reporter)) {
+ devlink_health_reporter_destroy(priv->hw_fault_reporter);
+ priv->hw_fault_reporter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
index 604e95a7c5ce..a090ebcfaabb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -7,6 +7,7 @@
#define __HINIC_DEVLINK_H__
#include <net/devlink.h>
+#include "hinic_dev.h"
#define MAX_FW_TYPE_NUM 30
#define HINIC_MAGIC_NUM 0x18221100
@@ -109,7 +110,10 @@ struct host_image_st {
struct devlink *hinic_devlink_alloc(void);
void hinic_devlink_free(struct devlink *devlink);
-int hinic_devlink_register(struct devlink *devlink, struct device *dev);
-void hinic_devlink_unregister(struct devlink *devlink);
+int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev);
+void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
+
+int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
+void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv);
#endif /* __HINIC_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index cb5ebae54f73..6bb65ade1d77 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1766,6 +1766,25 @@ static int hinic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int
+hinic_get_link_ext_state(struct net_device *netdev,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ if (netif_carrier_ok(netdev))
+ return -ENODATA;
+
+ if (nic_dev->cable_unplugged)
+ link_ext_state_info->link_ext_state =
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE;
+ else if (nic_dev->module_unrecognized)
+ link_ext_state_info->link_ext_state =
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
+
+ return 0;
+}
+
static const struct ethtool_ops hinic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES |
@@ -1776,6 +1795,7 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ext_state = hinic_get_link_ext_state,
.get_ringparam = hinic_get_ringparam,
.set_ringparam = hinic_set_ringparam,
.get_coalesce = hinic_get_coalesce,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 583fd24c29cf..29e88e25a4a4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -112,6 +112,26 @@ static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
}
+static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwif, addr);
+
+ dev_err(&chain->hwif->pdev->dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
+ chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR),
+ HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
+ HINIC_API_CMD_STATUS_GET(val, FSM));
+
+ dev_err(&chain->hwif->pdev->dev, "Chain hw current ci: 0x%x\n",
+ HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwif, addr);
+ dev_err(&chain->hwif->pdev->dev, "Chain hw current pi: 0x%x\n", val);
+}
+
/**
* chain_busy - check if the chain is still processing last requests
* @chain: chain to check
@@ -131,8 +151,10 @@ static int chain_busy(struct hinic_api_cmd_chain *chain)
/* check for a space for a new command */
if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
- dev_err(&pdev->dev, "API CMD chain %d is busy\n",
- chain->chain_type);
+ dev_err(&pdev->dev, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d\n",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
return -EBUSY;
}
break;
@@ -332,6 +354,7 @@ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
err = wait_for_status_poll(chain);
if (err) {
dev_err(&pdev->dev, "API CMD Poll status timeout\n");
+ dump_api_chain_reg(chain);
break;
}
break;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
index 0ba00fd828df..6d1654b050ad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
@@ -103,10 +103,14 @@
HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
+#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
+#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
#define HINIC_API_CMD_STATUS_GET(val, member) \
(((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index cb5b6e5f787f..e0eb294779ec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -401,6 +401,7 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
spin_unlock_bh(&cmdq->cmdq_lock);
+ hinic_dump_ceq_info(cmdq->hwdev);
return -ETIMEDOUT;
}
@@ -807,6 +808,7 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
cmdq_type = HINIC_CMDQ_SYNC;
for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdqs->cmdq[cmdq_type].hwdev = hwdev;
err = init_cmdq(&cmdqs->cmdq[cmdq_type],
&cmdqs->saved_wqs[cmdq_type], cmdq_type,
db_area[cmdq_type]);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 3e4b0aef9fe6..f40c31e1879f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -130,6 +130,8 @@ struct hinic_cmdq_ctxt {
};
struct hinic_cmdq {
+ struct hinic_hwdev *hwdev;
+
struct hinic_wq *wq;
enum hinic_cmdq_type cmdq_type;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 9831c14324e6..0c737765d113 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -16,8 +16,11 @@
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/netdevice.h>
+#include <net/devlink.h>
+#include "hinic_devlink.h"
#include "hinic_sriov.h"
+#include "hinic_dev.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_mgmt.h"
@@ -255,9 +258,9 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) {
- dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n",
- fw_ctxt.status);
- return -EFAULT;
+ dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fw_ctxt.status, out_size);
+ return -EIO;
}
return 0;
@@ -422,9 +425,9 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
&cmd_base_qpn, sizeof(cmd_base_qpn),
&cmd_base_qpn, &out_size);
if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) {
- dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n",
- cmd_base_qpn.status);
- return -EFAULT;
+ dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cmd_base_qpn.status, out_size);
+ return -EIO;
}
*base_qpn = cmd_base_qpn.qpn;
@@ -621,6 +624,113 @@ static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
nic_cb->cb_state &= ~HINIC_CB_RUNNING;
}
+static void hinic_comm_recv_mgmt_self_cmd_reg(struct hinic_pfhwdev *pfhwdev,
+ u8 cmd,
+ comm_mgmt_self_msg_proc proc)
+{
+ u8 cmd_idx;
+
+ cmd_idx = pfhwdev->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ dev_err(&pfhwdev->hwdev.hwif->pdev->dev,
+ "Register recv mgmt process failed, cmd: 0x%x\n", cmd);
+ return;
+ }
+
+ pfhwdev->proc.info[cmd_idx].cmd = cmd;
+ pfhwdev->proc.info[cmd_idx].proc = proc;
+ pfhwdev->proc.cmd_num++;
+}
+
+static void hinic_comm_recv_mgmt_self_cmd_unreg(struct hinic_pfhwdev *pfhwdev,
+ u8 cmd)
+{
+ u8 cmd_idx;
+
+ cmd_idx = pfhwdev->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Unregister recv mgmt process failed, cmd: 0x%x\n",
+ cmd);
+ return;
+ }
+
+ for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) {
+ if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
+ pfhwdev->proc.info[cmd_idx].cmd = 0;
+ pfhwdev->proc.info[cmd_idx].proc = NULL;
+ pfhwdev->proc.cmd_num--;
+ }
+ }
+}
+
+static void comm_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev = handle;
+ u8 cmd_idx;
+
+ for (cmd_idx = 0; cmd_idx < pfhwdev->proc.cmd_num; cmd_idx++) {
+ if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
+ if (!pfhwdev->proc.info[cmd_idx].proc) {
+ dev_warn(&pfhwdev->hwdev.hwif->pdev->dev,
+ "PF recv mgmt comm msg handle null, cmd: 0x%x\n",
+ cmd);
+ } else {
+ pfhwdev->proc.info[cmd_idx].proc
+ (&pfhwdev->hwdev, buf_in, in_size,
+ buf_out, out_size);
+ }
+
+ return;
+ }
+ }
+
+ dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "Received unknown mgmt cpu event: 0x%x\n",
+ cmd);
+
+ *out_size = 0;
+}
+
+/* pf fault report event */
+static void pf_fault_event_handler(void *dev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_fault_event *fault_event = buf_in;
+ struct hinic_hwdev *hwdev = dev;
+
+ if (in_size != sizeof(*fault_event)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid fault event report, length: %d, should be %zu\n",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->hw_fault_reporter))
+ return;
+
+ devlink_health_report(hwdev->devlink_dev->hw_fault_reporter,
+ "HW fatal error reported", &fault_event->event);
+}
+
+static void mgmt_watchdog_timeout_event_handler(void *dev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_mgmt_watchdog_info *watchdog_info = buf_in;
+ struct hinic_hwdev *hwdev = dev;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid mgmt watchdog report, length: %d, should be %zu\n",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->fw_fault_reporter))
+ return;
+
+ devlink_health_report(hwdev->devlink_dev->fw_fault_reporter,
+ "FW fatal error reported", watchdog_info);
+}
+
/**
* init_pfhwdev - Initialize the extended components of PF
* @pfhwdev: the HW device for PF
@@ -640,20 +750,37 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
+ err = hinic_devlink_register(hwdev->devlink_dev, &pdev->dev);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to register devlink\n");
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+ return err;
+ }
+
err = hinic_func_to_func_init(hwdev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
+ hinic_devlink_unregister(hwdev->devlink_dev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
return err;
}
- if (!HINIC_IS_VF(hwif))
+ if (!HINIC_IS_VF(hwif)) {
hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
HINIC_MOD_L2NIC, pfhwdev,
nic_mgmt_msg_handler);
- else
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ pfhwdev, comm_mgmt_msg_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(pfhwdev,
+ HINIC_COMM_CMD_FAULT_REPORT,
+ pf_fault_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg
+ (pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO,
+ mgmt_watchdog_timeout_event_handler);
+ } else {
hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
nic_mgmt_msg_handler);
+ }
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
@@ -670,14 +797,23 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
- if (!HINIC_IS_VF(hwdev->hwif))
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
+ HINIC_COMM_CMD_WATCHDOG_INFO);
+ hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
+ HINIC_COMM_CMD_FAULT_REPORT);
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_COMM);
hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
HINIC_MOD_L2NIC);
- else
+ } else {
hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ }
hinic_func_to_func_free(hwdev);
+ hinic_devlink_unregister(hwdev->devlink_dev);
+
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
@@ -777,7 +913,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
*
* Initialize the NIC HW device and return a pointer to it
**/
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
+struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink)
{
struct hinic_pfhwdev *pfhwdev;
struct hinic_hwdev *hwdev;
@@ -802,6 +938,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
hwdev = &pfhwdev->hwdev;
hwdev->hwif = hwif;
+ hwdev->devlink_dev = devlink_priv(devlink);
+ hwdev->devlink_dev->hwdev = hwdev;
err = init_msix(hwdev);
if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 94593a8ad667..2fb5f784f116 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <net/devlink.h>
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
@@ -164,9 +165,12 @@ enum hinic_ucode_cmd {
#define NIC_RSS_CMD_TEMP_FREE 0x02
enum hinic_mgmt_msg_cmd {
- HINIC_MGMT_MSG_CMD_BASE = 160,
+ HINIC_MGMT_MSG_CMD_BASE = 0xA0,
- HINIC_MGMT_MSG_CMD_LINK_STATUS = 160,
+ HINIC_MGMT_MSG_CMD_LINK_STATUS = 0xA0,
+
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT = 0xE5,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT = 0xE6,
HINIC_MGMT_MSG_CMD_MAX,
};
@@ -348,6 +352,7 @@ struct hinic_hwdev {
struct hinic_cap nic_cap;
u8 port_id;
+ struct hinic_devlink_priv *devlink_dev;
};
struct hinic_nic_cb {
@@ -359,12 +364,29 @@ struct hinic_nic_cb {
unsigned long cb_state;
};
+#define HINIC_COMM_SELF_CMD_MAX 4
+
+typedef void (*comm_mgmt_self_msg_proc)(void *handle, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+struct comm_mgmt_self_msg_sub_info {
+ u8 cmd;
+ comm_mgmt_self_msg_proc proc;
+};
+
+struct comm_mgmt_self_msg_info {
+ u8 cmd_num;
+ struct comm_mgmt_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
struct hinic_pfhwdev {
struct hinic_hwdev hwdev;
struct hinic_pf_to_mgmt pf_to_mgmt;
struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
+
+ struct comm_mgmt_self_msg_info proc;
};
struct hinic_dev_cap {
@@ -386,6 +408,126 @@ struct hinic_dev_cap {
u8 rsvd3[204];
};
+union hinic_fault_hw_mgmt {
+ u32 val[4];
+ /* valid only type == FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ u8 err_level;
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only if type == FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+
+ struct {
+ /* 0: read; 1: write */
+ u8 op_type;
+ u8 port_id;
+ u8 dev_ad;
+ u8 rsvd9;
+ u32 csr_addr;
+ u32 op_data;
+ u32 rsvd10;
+ } phy_fault;
+};
+
+struct hinic_fault_event {
+ u8 type;
+ u8 fault_level;
+ u8 rsvd0[2];
+ union hinic_fault_hw_mgmt event;
+};
+
+struct hinic_cmd_fault_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_fault_event event;
+};
+
+enum hinic_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_PHY_FAULT,
+ FAULT_TYPE_MAX,
+};
+
+#define FAULT_SHOW_STR_LEN 16
+
+enum hinic_fault_err_level {
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+struct hinic_mgmt_watchdog_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd, void *handle,
void (*handler)(void *handle, void *buf_in,
@@ -407,7 +549,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev);
+struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink);
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 397936cac304..ca8cb68a8d20 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -953,3 +953,42 @@ void hinic_ceqs_free(struct hinic_ceqs *ceqs)
for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
remove_eq(&ceqs->ceq[q_id]);
}
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq = NULL;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) {
+ eq = &hwdev->func_to_io.ceqs.ceq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi,
+ eq->ceq_tasklet.state,
+ eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
+ }
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeq_elem *aeqe_pos = NULL;
+ struct hinic_eq *eq = NULL;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) {
+ eq = &hwdev->aeqs.aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
+ q_id, ci, pi, work_busy(&eq->aeq_work.work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc));
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index 74b9ff90640c..43065fc70869 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -162,7 +162,7 @@ enum hinic_eqe_state {
struct hinic_aeq_elem {
u8 data[HINIC_AEQE_DATA_SIZE];
- u32 desc;
+ __be32 desc;
};
struct hinic_eq_work {
@@ -254,4 +254,8 @@ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
void hinic_ceqs_free(struct hinic_ceqs *ceqs);
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index cf127d896ba6..bc8925c0c982 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -21,6 +21,8 @@
#define WAIT_HWIF_READY_TIMEOUT 10000
+#define HINIC_SELFTEST_RESULT 0x883C
+
/**
* hinic_msix_attr_set - set message attribute for msix entry
* @hwif: the HW interface of a pci function device
@@ -369,6 +371,26 @@ u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
return HINIC_FA0_GET(attr0, PF_IDX);
}
+static void __print_selftest_reg(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ dev_err(&hwif->pdev->dev, "PCIE is link down\n");
+ return;
+ }
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+ if (HINIC_FA0_GET(attr0, FUNC_TYPE) != HINIC_VF &&
+ !HINIC_FA0_GET(attr0, PCI_INTF_IDX))
+ dev_err(&hwif->pdev->dev, "Selftest reg: 0x%08x\n",
+ hinic_hwif_read_reg(hwif, HINIC_SELFTEST_RESULT));
+}
+
/**
* hinic_init_hwif - initialize the hw interface
* @hwif: the HW interface of a pci function device
@@ -398,6 +420,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
err = wait_hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
+ __print_selftest_reg(hwif);
goto err_hwif_ready;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 0872e035faa1..c06f2253151e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -12,6 +12,8 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF
+
#define HINIC_DMA_ATTR_ST_SHIFT 0
#define HINIC_DMA_ATTR_AT_SHIFT 8
#define HINIC_DMA_ATTR_PH_SHIFT 10
@@ -249,13 +251,17 @@ struct hinic_hwif {
static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
{
- return be32_to_cpu(readl(hwif->cfg_regs_bar + reg));
+ u32 out = readl(hwif->cfg_regs_bar + reg);
+
+ return be32_to_cpu(*(__be32 *)&out);
}
static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
u32 val)
{
- writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg);
+ __be32 in = cpu_to_be32(val);
+
+ writel(*(u32 *)&in, hwif->cfg_regs_bar + reg);
}
int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index bc2f87e6cb5d..47c93f946b94 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -650,6 +650,7 @@ wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
if (!wait_for_completion_timeout(done, jif)) {
dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
dump_mox_reg(hwdev);
+ hinic_dump_aeq_info(hwdev);
return -ETIMEDOUT;
}
@@ -897,6 +898,7 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
dev_err(&func_to_func->hwif->pdev->dev,
"Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ hinic_dump_aeq_info(func_to_func->hwdev);
err = -ETIMEDOUT;
goto err_send_mbox;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index e0f5a81d8620..c6ce5966284c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -12,8 +12,10 @@
#include <linux/semaphore.h>
#include <linux/completion.h>
#include <linux/slab.h>
+#include <net/devlink.h>
#include <asm/barrier.h>
+#include "hinic_devlink.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_api_cmd.h"
@@ -274,6 +276,7 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
+ hinic_dump_aeq_info(pf_to_mgmt->hwdev);
err = -ETIMEDOUT;
goto unlock_sync_msg;
}
@@ -617,10 +620,15 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
if (HINIC_IS_VF(hwif))
return 0;
+ err = hinic_health_reporters_create(hwdev->devlink_dev);
+ if (err)
+ return err;
+
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
if (!pf_to_mgmt->workq) {
dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
return -ENOMEM;
}
pf_to_mgmt->sync_msg_id = 0;
@@ -628,12 +636,14 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -658,4 +668,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
destroy_workqueue(pf_to_mgmt->workq);
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 21b93b654d6b..f626100b85c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -81,6 +81,8 @@ enum hinic_comm_cmd {
HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+ HINIC_COMM_CMD_FAULT_REPORT = 0x37,
+
HINIC_COMM_CMD_SET_LED_STATUS = 0x4a,
HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
@@ -89,6 +91,8 @@ enum hinic_comm_cmd {
HINIC_COMM_CMD_GET_BOARD_INFO = 0x52,
+ HINIC_COMM_CMD_WATCHDOG_INFO = 0x56,
+
HINIC_COMM_CMD_MAX,
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index c4c6f9c29f0e..501056fd32ee 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -444,8 +444,11 @@ int hinic_open(struct net_device *netdev)
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
- if (link_state == HINIC_LINK_STATE_UP)
+ if (link_state == HINIC_LINK_STATE_UP) {
nic_dev->flags |= HINIC_LINK_UP;
+ nic_dev->cable_unplugged = false;
+ nic_dev->module_unrecognized = false;
+ }
nic_dev->flags |= HINIC_INTF_UP;
@@ -935,6 +938,8 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
down(&nic_dev->mgmt_lock);
nic_dev->flags |= HINIC_LINK_UP;
+ nic_dev->cable_unplugged = false;
+ nic_dev->module_unrecognized = false;
if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
(HINIC_LINK_UP | HINIC_INTF_UP)) {
@@ -971,6 +976,39 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
*out_size = sizeof(*ret_link_status);
}
+static void cable_plug_event(void *handle,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cable_plug_event *plug_event = buf_in;
+ struct hinic_dev *nic_dev = handle;
+
+ nic_dev->cable_unplugged = plug_event->plugged ? false : true;
+
+ *out_size = sizeof(*plug_event);
+ plug_event = buf_out;
+ plug_event->status = 0;
+}
+
+static void link_err_event(void *handle,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_link_err_event *link_err = buf_in;
+ struct hinic_dev *nic_dev = handle;
+
+ if (link_err->err_type >= LINK_ERR_NUM)
+ netif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, Unknown error type: 0x%x\n",
+ link_err->err_type);
+ else
+ nic_dev->module_unrecognized = true;
+
+ *out_size = sizeof(*link_err);
+ link_err = buf_out;
+ link_err->status = 0;
+}
+
static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change)
@@ -1077,28 +1115,24 @@ static int nic_dev_init(struct pci_dev *pdev)
struct hinic_rx_mode_work *rx_mode_work;
struct hinic_txq_stats *tx_stats;
struct hinic_rxq_stats *rx_stats;
- struct hinic_devlink_priv *priv;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
struct devlink *devlink;
int err, num_qps;
- hwdev = hinic_init_hwdev(pdev);
- if (IS_ERR(hwdev)) {
- dev_err(&pdev->dev, "Failed to initialize HW device\n");
- return PTR_ERR(hwdev);
- }
-
devlink = hinic_devlink_alloc();
if (!devlink) {
dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
- err = -ENOMEM;
- goto err_devlink_alloc;
+ return -ENOMEM;
}
- priv = devlink_priv(devlink);
- priv->hwdev = hwdev;
+ hwdev = hinic_init_hwdev(pdev, devlink);
+ if (IS_ERR(hwdev)) {
+ dev_err(&pdev->dev, "Failed to initialize HW device\n");
+ hinic_devlink_free(devlink);
+ return PTR_ERR(hwdev);
+ }
num_qps = hinic_hwdev_num_qps(hwdev);
if (num_qps <= 0) {
@@ -1161,10 +1195,6 @@ static int nic_dev_init(struct pci_dev *pdev)
goto err_workq;
}
- err = hinic_devlink_register(devlink, &pdev->dev);
- if (err)
- goto err_devlink_reg;
-
pci_set_drvdata(pdev, netdev);
err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
@@ -1206,6 +1236,12 @@ static int nic_dev_init(struct pci_dev *pdev)
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
nic_dev, link_status_event_handler);
+ hinic_hwdev_cb_register(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT,
+ nic_dev, cable_plug_event);
+ hinic_hwdev_cb_register(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT,
+ nic_dev, link_err_event);
err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
if (err)
@@ -1238,6 +1274,10 @@ err_init_intr:
err_set_pfc:
err_set_features:
hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
cancel_work_sync(&rx_mode_work->work);
@@ -1246,17 +1286,15 @@ err_set_mtu:
err_add_mac:
err_get_mac:
pci_set_drvdata(pdev, NULL);
-err_devlink_reg:
destroy_workqueue(nic_dev->workq);
-
err_workq:
err_vlan_bitmap:
free_netdev(netdev);
err_alloc_etherdev:
err_num_qps:
-err_devlink_alloc:
hinic_free_hwdev(hwdev);
+ hinic_devlink_free(devlink);
return err;
}
@@ -1343,6 +1381,7 @@ static void hinic_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct devlink *devlink = nic_dev->devlink;
struct hinic_rx_mode_work *rx_mode_work;
if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
@@ -1357,6 +1396,10 @@ static void hinic_remove(struct pci_dev *pdev)
hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
rx_mode_work = &nic_dev->rx_mode_work;
@@ -1364,16 +1407,14 @@ static void hinic_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
- hinic_devlink_unregister(nic_dev->devlink);
-
destroy_workqueue(nic_dev->workq);
- hinic_devlink_free(nic_dev->devlink);
-
hinic_free_hwdev(nic_dev->hwdev);
free_netdev(netdev);
+ hinic_devlink_free(devlink);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index ba358bbb74a2..02cd635d6914 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -61,8 +61,8 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
(port_mac_cmd.status &&
port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY &&
port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
- dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
- port_mac_cmd.status);
+ dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mac_cmd.status, out_size);
return -EFAULT;
}
@@ -129,8 +129,8 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
&port_mac_cmd, sizeof(port_mac_cmd),
&port_mac_cmd, &out_size);
if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to get mac, ret = %d\n",
- port_mac_cmd.status);
+ dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mac_cmd.status, out_size);
return -EFAULT;
}
@@ -172,9 +172,9 @@ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
&port_mtu_cmd, sizeof(port_mtu_cmd),
&port_mtu_cmd, &out_size);
- if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) {
- dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n",
- port_mtu_cmd.status);
+ if (err || out_size != sizeof(port_mtu_cmd) || port_mtu_cmd.status) {
+ dev_err(&pdev->dev, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mtu_cmd.status, out_size);
return -EFAULT;
}
@@ -264,8 +264,8 @@ int hinic_port_link_state(struct hinic_dev *nic_dev,
&link_cmd, sizeof(link_cmd),
&link_cmd, &out_size);
if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) {
- dev_err(&pdev->dev, "Failed to get link state, ret = %d\n",
- link_cmd.status);
+ dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_cmd.status, out_size);
return -EINVAL;
}
@@ -298,8 +298,8 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
&port_state, sizeof(port_state),
&port_state, &out_size);
if (err || (out_size != sizeof(port_state)) || port_state.status) {
- dev_err(&pdev->dev, "Failed to set port state, ret = %d\n",
- port_state.status);
+ dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_state.status, out_size);
return -EFAULT;
}
@@ -330,8 +330,8 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
&func_state, sizeof(func_state),
&func_state, &out_size);
if (err || (out_size != sizeof(func_state)) || func_state.status) {
- dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n",
- func_state.status);
+ dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, func_state.status, out_size);
return -EFAULT;
}
@@ -361,9 +361,9 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
port_cap, &out_size);
if (err || (out_size != sizeof(*port_cap)) || port_cap->status) {
dev_err(&pdev->dev,
- "Failed to get port capabilities, ret = %d\n",
- port_cap->status);
- return -EINVAL;
+ "Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_cap->status, out_size);
+ return -EIO;
}
return 0;
@@ -393,9 +393,9 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
&tso_cfg, &out_size);
if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) {
dev_err(&pdev->dev,
- "Failed to set port tso, ret = %d\n",
- tso_cfg.status);
- return -EINVAL;
+ "Failed to set port tso, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, tso_cfg.status, out_size);
+ return -EIO;
}
return 0;
@@ -423,9 +423,9 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
&rx_csum_cfg, &out_size);
if (err || !out_size || rx_csum_cfg.status) {
dev_err(&pdev->dev,
- "Failed to set rx csum offload, ret = %d\n",
- rx_csum_cfg.status);
- return -EINVAL;
+ "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rx_csum_cfg.status, out_size);
+ return -EIO;
}
return 0;
@@ -480,9 +480,9 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
&rq_num, &out_size);
if (err || !out_size || rq_num.status) {
dev_err(&pdev->dev,
- "Failed to rxq number, ret = %d\n",
- rq_num.status);
- return -EINVAL;
+ "Failed to set rxq number, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rq_num.status, out_size);
+ return -EIO;
}
return 0;
@@ -508,9 +508,9 @@ static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
&lro_cfg, &out_size);
if (err || !out_size || lro_cfg.status) {
dev_err(&pdev->dev,
- "Failed to set lro offload, ret = %d\n",
- lro_cfg.status);
- return -EINVAL;
+ "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EIO;
}
return 0;
@@ -542,10 +542,10 @@ static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value)
if (err || !out_size || lro_timer.status) {
dev_err(&pdev->dev,
- "Failed to set lro timer, ret = %d\n",
- lro_timer.status);
+ "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_timer.status, out_size);
- return -EINVAL;
+ return -EIO;
}
return 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 14931adaffb8..9c3cbe45c9ec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -189,6 +189,31 @@ struct hinic_port_link_status {
u8 port_id;
};
+struct hinic_cable_plug_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+struct hinic_link_err_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
struct hinic_port_func_state_cmd {
u8 status;
u8 version;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index caf7e81e3f62..141206917e4d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -40,9 +40,9 @@ static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
if (err || out_size != sizeof(mac_info) ||
(mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY &&
mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
- dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to change MAC, ret = %d\n",
- mac_info.status);
- return -EFAULT;
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EIO;
}
return 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fd7eae25fe9..5afb3c9c52d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3cd13fd55011..5aa86318ed3e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -295,6 +295,7 @@ config ICE
default n
depends on PCI_MSI
select NET_DEVLINK
+ select PLDMFW
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 91c64f91a835..36da059388dc 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2993,8 +2993,6 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
e100_down(nic);
netif_device_detach(netdev);
- pci_save_state(pdev);
-
if ((nic->flags & wol_magic) | e100_asf(nic)) {
/* enable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
@@ -3024,24 +3022,22 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-#ifdef CONFIG_PM
-static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused e100_suspend(struct device *dev_d)
{
bool wake;
- __e100_shutdown(pdev, &wake);
- return __e100_power_off(pdev, wake);
+
+ __e100_shutdown(to_pci_dev(dev_d), &wake);
+
+ device_wakeup_disable(dev_d);
+
+ return 0;
}
-static int e100_resume(struct pci_dev *pdev)
+static int __maybe_unused e100_resume(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* disable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
@@ -3058,7 +3054,6 @@ static int e100_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
static void e100_shutdown(struct pci_dev *pdev)
{
@@ -3146,16 +3141,17 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
+static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+
static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
.probe = e100_probe,
.remove = e100_remove,
-#ifdef CONFIG_PM
+
/* Power Management hooks */
- .suspend = e100_suspend,
- .resume = e100_resume,
-#endif
+ .driver.pm = &e100_pm_ops,
+
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 0b4196d2cdd4..f976e9daa3d8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1356,8 +1356,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ skb->data[frame_size / 2 + 10] = 0xBE;
+ skb->data[frame_size / 2 + 12] = 0xAF;
}
static int e1000_check_lbtest_frame(const unsigned char *data,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 64f684dc6c7a..a8fc9208382c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1608,8 +1608,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ skb->data[frame_size / 2 + 10] = 0xBE;
+ skb->data[frame_size / 2 + 12] = 0xAF;
}
static int e1000_check_lbtest_frame(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ae0a6332fd30..b2f2fcfdf732 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
- if (ret_val) {
+ if (ret_val)
e_warn("Failed to disable ULP\n");
- goto out;
- }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 48c956d90b90..d870343cf689 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3766,7 +3766,6 @@ err_dma:
return err;
}
-#ifdef CONFIG_PM
/**
* iavf_suspend - Power management suspend routine
* @pdev: PCI device information struct
@@ -3774,11 +3773,10 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused iavf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
- int retval = 0;
netif_device_detach(netdev);
@@ -3796,12 +3794,6 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- pci_disable_device(pdev);
-
return 0;
}
@@ -3811,24 +3803,13 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int iavf_resume(struct pci_dev *pdev)
+static int __maybe_unused iavf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
- return err;
- }
pci_set_master(pdev);
rtnl_lock();
@@ -3852,7 +3833,6 @@ static int iavf_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
/**
* iavf_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -3954,16 +3934,15 @@ static void iavf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+
static struct pci_driver iavf_driver = {
- .name = iavf_driver_name,
- .id_table = iavf_pci_tbl,
- .probe = iavf_probe,
- .remove = iavf_remove,
-#ifdef CONFIG_PM
- .suspend = iavf_suspend,
- .resume = iavf_resume,
-#endif
- .shutdown = iavf_shutdown,
+ .name = iavf_driver_name,
+ .id_table = iavf_pci_tbl,
+ .probe = iavf_probe,
+ .remove = iavf_remove,
+ .driver.pm = &iavf_pm_ops,
+ .shutdown = iavf_shutdown,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 980bbcc64b4b..6da4f43f2348 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -23,6 +23,7 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
+ ice_fw_update.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index c665220bb637..fe140ff38f74 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
@@ -255,6 +256,7 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
+ u32 rx_gro_dropped;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
@@ -412,6 +414,12 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
+
+ /* spinlock to protect the AdminQ wait list */
+ spinlock_t aq_wait_lock;
+ struct hlist_head aq_wait_list;
+ wait_queue_head_t aq_wait_queue;
+
u32 hw_csum_rx_error;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
@@ -593,6 +601,8 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index b363e0223670..ba9375218fef 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -109,6 +109,13 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
+#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define ICE_AQC_CAPS_OROM_VER 0x004A
+#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define ICE_AQC_CAPS_NET_VER 0x004C
+#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
u8 minor_ver;
@@ -1298,7 +1305,14 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4)
+#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1347,6 +1361,67 @@ struct ice_aqc_nvm_checksum {
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+/* Used for NVM Set Package Data command - 0x070A */
+struct ice_aqc_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define ICE_AQC_NVM_PKG_DELETE BIT(0) /* used for command call */
+#define ICE_AQC_NVM_PKG_SKIPPED BIT(0) /* used for command response */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ice_aqc_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+ u8 component_response_code; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define ICE_AQ_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+#define ICE_AQ_NVM_PASS_COMP_TBL_START 0x1
+#define ICE_AQ_NVM_PASS_COMP_TBL_MIDDLE 0x2
+#define ICE_AQ_NVM_PASS_COMP_TBL_END 0x4
+#define ICE_AQ_NVM_PASS_COMP_TBL_START_AND_END 0x5
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1506,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
@@ -1794,6 +1869,8 @@ struct ice_aq_desc {
struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_nvm_pkg_data pkg_data;
+ struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
@@ -1922,7 +1999,13 @@ enum ice_adminq_opc {
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_erase = 0x0702,
+ ice_aqc_opc_nvm_write = 0x0703,
ice_aqc_opc_nvm_checksum = 0x0706,
+ ice_aqc_opc_nvm_write_activate = 0x0707,
+ ice_aqc_opc_nvm_update_empr = 0x0709,
+ ice_aqc_opc_nvm_pkg_data = 0x070A,
+ ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index c72cc77b8d67..34abfcea9858 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -611,8 +611,6 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
if (!status) {
u16 i;
@@ -1029,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay, uld_mask;
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
- for (cnt = 0; cnt < grst_delay; cnt++) {
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
- if (cnt == grst_delay) {
+ if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
@@ -1720,8 +1718,7 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status
-ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
@@ -1857,6 +1854,25 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
+ case ICE_AQC_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -2104,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* @cap_count: the number of capabilities
*
* Helper device to parse device (0x000B) capabilities list. For
- * capabilities shared between device and device, this relies on
+ * capabilities shared between device and function, this relies on
* ice_parse_common_caps.
*
* Loop through the list of provided capabilities and extract the relevant
@@ -2204,7 +2220,7 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-static enum ice_status
+enum ice_status
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
enum ice_status status;
@@ -3888,7 +3904,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ buf->txqs[0].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->txqs[0].info.generic = 0;
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
@@ -4338,3 +4365,36 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
return false;
}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 33a681a75439..3ebb973878c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -11,8 +11,6 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -90,6 +88,8 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
enum ice_status
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@@ -172,4 +172,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 1e18021aa073..1f46a7828be8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
- int i; \
/* free descriptors */ \
- if ((qi)->ring.r.ring##_bi) \
+ if ((qi)->ring.r.ring##_bi) { \
+ int i; \
+ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
@@ -325,6 +326,7 @@ do { \
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
+ } \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 2cecc9d08005..2a3147ee0bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -135,39 +135,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
}
/**
- * ice_aq_set_lldp_mib - Set the LLDP MIB
- * @hw: pointer to the HW struct
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buf: pointer to the caller-supplied buffer to store the MIB block
- * @buf_size: size of the buffer (in bytes)
- * @cd: pointer to command details structure or NULL
- *
- * Set the LLDP MIB. (0x0A08)
- */
-static enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.lldp_set_mib;
-
- if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
-
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
- desc.datalen = cpu_to_le16(buf_size);
-
- cmd->type = mib_type;
- cmd->length = cpu_to_le16(buf_size);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 979af197f8a3..36abd6b7280c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- /* If DCB was not enabled previously, we are done */
- if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
- return;
-
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp)
@@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
}
- dev_info(dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 323238669572..35c21d9ae009 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
+
+static inline bool ice_is_dcb_active(struct ice_pf *pf)
+{
+ return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
+ test_bit(ICE_FLAG_DCB_ENA, pf->flags));
+}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
@@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
+
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 43da2dcb0cbc..111d6bfe4222 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_fw_update.h"
static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
@@ -229,8 +230,61 @@ static int ice_devlink_info_get(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_flash_update - Update firmware stored in flash on the device
+ * @devlink: pointer to devlink associated with device to update
+ * @path: the path of the firmware file to use via request_firmware
+ * @component: name of the component to update, or NULL
+ * @extack: netlink extended ACK structure
+ *
+ * Perform a device flash update. The bulk of the update logic is contained
+ * within the ice_flash_pldm_image function.
+ *
+ * Returns: zero on success, or an error code on failure.
+ */
+static int
+ice_devlink_flash_update(struct devlink *devlink, const char *path,
+ const char *component, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+ const struct firmware *fw;
+ int err;
+
+ /* individual component update is not yet supported */
+ if (component)
+ return -EOPNOTSUPP;
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ err = ice_check_for_pending_update(pf, component, extack);
+ if (err)
+ return err;
+
+ err = request_firmware(&fw, path, dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
+ return err;
+ }
+
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash",
+ component, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(fw);
+
+ return err;
+}
+
static const struct devlink_ops ice_devlink_ops = {
.info_get = ice_devlink_info_get,
+ .flash_update = ice_devlink_flash_update,
};
static void ice_devlink_free(void *devlink_ptr)
@@ -303,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function id.
+ * will still be numbered according to the physical function ID.
*
* Return: zero on success or an error code on failure.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 06b93e97892d..9e8e9531cd87 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_busy", tx_busy),
+ ICE_VSI_STAT("tx_restart", tx_restart),
};
enum ice_ethtool_test_id {
@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 3c217e51b27e..b17ae3e20157 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
@@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
ICE_FLOW_ENTRY_HNDL(e));
list_del(&p->l_entry);
+
+ mutex_destroy(&p->entries_lock);
devm_kfree(ice_hw_to_dev(hw), p);
}
mutex_unlock(&hw->fl_profs_locks[blk_idx]);
@@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t));
- memset(es->t, 0, es->count * sizeof(*es->t));
+ memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
}
@@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
- if (!es->ref_count)
+ if (!es->written)
goto err;
}
return 0;
@@ -3874,16 +3878,16 @@ err_ice_add_prof:
}
/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
+ * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
@@ -3898,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
}
/**
- * ice_search_prof_id - Search for a profile tracking ID
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added.
- */
-static struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry;
-
- mutex_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- mutex_unlock(&hw->blk[blk].es.prof_map_lock);
-
- return entry;
-}
-
-/**
* ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
mutex_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
@@ -4166,22 +4150,28 @@ static enum ice_status
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
@@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
list_add(&p->list_entry, chg);
}
- return 0;
-
err_ice_get_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -4254,17 +4243,23 @@ static enum ice_status
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
@@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
list_add(&p->list, lst);
- return 0;
+err_ice_add_prof_to_lst:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
-
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS;
@@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
if (!t)
return ICE_ERR_NO_MEMORY;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_id_vsig;
+ }
+
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
@@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return 0;
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
devm_kfree(ice_hw_to_dev(hw), t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d74e5290677f..fe677621dd51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
if (list_empty(&hw->fl_profs[blk]))
return 0;
- mutex_lock(&hw->fl_profs_locks[blk]);
+ mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
if (test_bit(vsi_handle, p->vsis)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break;
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- mutex_unlock(&hw->fl_profs_locks[blk]);
+ mutex_unlock(&hw->rss_locks);
return status;
}
@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
mutex_unlock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
new file mode 100644
index 000000000000..deaefe00c9c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/uuid.h>
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include "ice.h"
+#include "ice_fw_update.h"
+
+struct ice_fwu_priv {
+ struct pldmfw context;
+
+ struct ice_pf *pf;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+};
+
+/**
+ * ice_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 *package_data;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ status = ice_nvm_set_pkg_data(hw, false, package_data, length, NULL);
+
+ kfree(package_data);
+
+ if (status) {
+ dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_check_component_response - Report firmware response to a component
+ * @pf: device private data structure
+ * @id: component id being checked
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Returns: zero if the component can be updated, or -ECANCELED of the
+ * firmware indicates the component cannot be updated.
+ */
+static int
+ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const char *component;
+
+ switch (id) {
+ case NVM_COMP_ID_OROM:
+ component = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ component = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ component = "fw.netlist";
+ break;
+ default:
+ WARN(1, "Unexpected unknown component identifier 0x%02x", id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: firmware response 0x%x, code 0x%x\n",
+ component, response, code);
+
+ switch (response) {
+ case ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* firmware indicated this update is good to proceed */
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ dev_warn(dev, "firmware recommends not updating %s, as it may result in a downgrade. continuing anyways\n", component);
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ dev_info(dev, "firmware has rejected updating %s\n", component);
+ break;
+ }
+
+ switch (code) {
+ case ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ dev_err(dev, "Component comparison stamp for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_STAMP_LOWER:
+ dev_err(dev, "Component comparison stamp for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is lower than running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ dev_err(dev, "Component comparison stamp for %s is invalid\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is invalid");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE:
+ dev_err(dev, "%s conflicts with a previous component table\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component table conflict occurred");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ dev_err(dev, "Pre-requisites for component %s have not been met\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ dev_err(dev, "%s is not a supported component\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ dev_err(dev, "Security restrictions prevent %s from being downgraded\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ dev_err(dev, "Received an incomplete component image for %s\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ dev_err(dev, "Component version for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ dev_err(dev, "Component version for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is lower than the running image");
+ break;
+ default:
+ dev_err(dev, "Unexpected response code 0x02%x for %s\n",
+ code, component);
+ NL_SET_ERR_MSG_MOD(extack, "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ice_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends AdminQ commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_component_table(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_aqc_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ size_t length;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ dev_err(dev, "Unable to update due to a firmware component with unknown ID %u\n",
+ component->identifier);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+ memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+
+ status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code, NULL);
+
+ kfree(comp_tbl);
+
+ if (status) {
+ dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ice_check_component_response(pf, component->identifier, comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ice_write_one_nvm_block - Write an NVM block and await completion response
+ * @pf: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 completion_offset;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (status) {
+ dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
+ module, offset, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
+ module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return -EIO;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
+ completion_offset |= event.desc.params.nvm.offset_high << 16;
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
+ completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_offset != offset) {
+ dev_err(dev, "Unexpected offset in write completion: got %u, expected %u\n",
+ completion_offset, offset);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
+ module, offset,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_nvm_module - Write data to an NVM module
+ * @pf: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ const u8 *image, u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, ICE_AQ_MAX_BUF_LEN, length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ /* ice_aq_update_nvm may copy the firmware response into the
+ * buffer, so we must make a copy since the source data is
+ * constant.
+ */
+ memcpy(block, image + offset, block_size);
+
+ err = ice_write_one_nvm_block(pf, module, offset, block_size,
+ block, last_cmd, extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+ return err;
+}
+
+/**
+ * ice_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @pf: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ struct devlink *devlink;
+ enum ice_status status;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0);
+
+ status = ice_aq_erase_nvm(hw, module, NULL);
+ if (status) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
+ component, module, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ /* Yes, this really can take minutes to complete */
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
+ component, module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ goto out_notify_devlink;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
+ component, completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ component, module,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+out_notify_devlink:
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ice_switch_flash_banks - Tell firmware to switch NVM banks
+ * @pf: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Returns: zero on success or an error code on failure.
+ */
+static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u16 completion_retval;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_nvm_write_activate(hw, activate_flags);
+ if (status) {
+ dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ,
+ &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
+ err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return err;
+ }
+
+ completion_retval = le16_to_cpu(event.desc.retval);
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ const char *name;
+ u16 module;
+ u8 flag;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = ICE_SR_1ST_OROM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = ICE_SR_1ST_NVM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = ICE_SR_NETLIST_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+ default:
+ /* This should not trigger, since we check the id before
+ * sending the component table to firmware.
+ */
+ WARN(1, "Unexpected unknown component identifier 0x%02x",
+ component->identifier);
+ return -EINVAL;
+ }
+
+ /* Mark this component for activating at the end */
+ priv->activate_flags |= flag;
+
+ err = ice_erase_nvm_module(pf, module, name, extack);
+ if (err)
+ return err;
+
+ return ice_write_nvm_module(pf, module, name, component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ice_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Returns: 0 on success, or an error code on failure.
+ */
+static int ice_finalize_update(struct pldmfw *context)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct pldmfw_ops ice_fwu_ops = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
+/**
+ * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @pf: private device driver structure
+ * @fw: firmware object pointing to the relevant firmware file
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ enum ice_status status;
+ int err;
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ice_fwu_ops;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, fw);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ice_check_for_pending_update - Check for a pending flash update
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Check whether the device already has a pending flash update. If such an
+ * update is found, cancel it so that the requested update may proceed.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw_dev_caps *dev_caps;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 pending = 0;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ /* Read the most recent device capabilities from firmware. Do not use
+ * the cached values in hw->dev_caps, because the pending update flag
+ * may have changed, e.g. if an update was previously completed and
+ * the system has not yet rebooted.
+ */
+ status = ice_discover_dev_caps(hw, dev_caps);
+ if (status) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm) {
+ dev_info(dev, "The fw.mgmt flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_orom) {
+ dev_info(dev, "The fw.undi flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist) {
+ dev_info(dev, "The fw.netlist flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ }
+
+ kfree(dev_caps);
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ else
+ WARN(1, "Unexpected flash component %s", component);
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
+ err = ice_switch_flash_banks(pf, pending, extack);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
new file mode 100644
index 000000000000..79472cc618b4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _ICE_FW_UPDATE_H_
+#define _ICE_FW_UPDATE_H_
+
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 92e4abca62a4..90abc8612a6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
-#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
+#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -362,6 +362,7 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
@@ -378,6 +379,5 @@
#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
-#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 14dfbbc1b2cf..4ec24c3e813f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -601,6 +601,7 @@ struct ice_tlan_ctx {
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(3),
+ ICE_PTT_UNUSED_ENTRY(4),
+ ICE_PTT_UNUSED_ENTRY(5),
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(8),
+ ICE_PTT_UNUSED_ENTRY(9),
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(12),
+ ICE_PTT_UNUSED_ENTRY(13),
+ ICE_PTT_UNUSED_ENTRY(14),
+ ICE_PTT_UNUSED_ENTRY(15),
+ ICE_PTT_UNUSED_ENTRY(16),
+ ICE_PTT_UNUSED_ENTRY(17),
+ ICE_PTT_UNUSED_ENTRY(18),
+ ICE_PTT_UNUSED_ENTRY(19),
+ ICE_PTT_UNUSED_ENTRY(20),
+ ICE_PTT_UNUSED_ENTRY(21),
+
+ /* Non Tunneled IPv4 */
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(25),
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(32),
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(39),
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(47),
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(54),
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(62),
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(69),
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(77),
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(84),
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT_UNUSED_ENTRY(91),
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(98),
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(105),
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(113),
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(120),
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(128),
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(135),
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(143),
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(150),
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ ICE_PTT_UNUSED_ENTRY(154),
+ ICE_PTT_UNUSED_ENTRY(155),
+ ICE_PTT_UNUSED_ENTRY(156),
+ ICE_PTT_UNUSED_ENTRY(157),
+ ICE_PTT_UNUSED_ENTRY(158),
+ ICE_PTT_UNUSED_ENTRY(159),
+
+ ICE_PTT_UNUSED_ENTRY(160),
+ ICE_PTT_UNUSED_ENTRY(161),
+ ICE_PTT_UNUSED_ENTRY(162),
+ ICE_PTT_UNUSED_ENTRY(163),
+ ICE_PTT_UNUSED_ENTRY(164),
+ ICE_PTT_UNUSED_ENTRY(165),
+ ICE_PTT_UNUSED_ENTRY(166),
+ ICE_PTT_UNUSED_ENTRY(167),
+ ICE_PTT_UNUSED_ENTRY(168),
+ ICE_PTT_UNUSED_ENTRY(169),
+
+ ICE_PTT_UNUSED_ENTRY(170),
+ ICE_PTT_UNUSED_ENTRY(171),
+ ICE_PTT_UNUSED_ENTRY(172),
+ ICE_PTT_UNUSED_ENTRY(173),
+ ICE_PTT_UNUSED_ENTRY(174),
+ ICE_PTT_UNUSED_ENTRY(175),
+ ICE_PTT_UNUSED_ENTRY(176),
+ ICE_PTT_UNUSED_ENTRY(177),
+ ICE_PTT_UNUSED_ENTRY(178),
+ ICE_PTT_UNUSED_ENTRY(179),
+
+ ICE_PTT_UNUSED_ENTRY(180),
+ ICE_PTT_UNUSED_ENTRY(181),
+ ICE_PTT_UNUSED_ENTRY(182),
+ ICE_PTT_UNUSED_ENTRY(183),
+ ICE_PTT_UNUSED_ENTRY(184),
+ ICE_PTT_UNUSED_ENTRY(185),
+ ICE_PTT_UNUSED_ENTRY(186),
+ ICE_PTT_UNUSED_ENTRY(187),
+ ICE_PTT_UNUSED_ENTRY(188),
+ ICE_PTT_UNUSED_ENTRY(189),
+
+ ICE_PTT_UNUSED_ENTRY(190),
+ ICE_PTT_UNUSED_ENTRY(191),
+ ICE_PTT_UNUSED_ENTRY(192),
+ ICE_PTT_UNUSED_ENTRY(193),
+ ICE_PTT_UNUSED_ENTRY(194),
+ ICE_PTT_UNUSED_ENTRY(195),
+ ICE_PTT_UNUSED_ENTRY(196),
+ ICE_PTT_UNUSED_ENTRY(197),
+ ICE_PTT_UNUSED_ENTRY(198),
+ ICE_PTT_UNUSED_ENTRY(199),
+
+ ICE_PTT_UNUSED_ENTRY(200),
+ ICE_PTT_UNUSED_ENTRY(201),
+ ICE_PTT_UNUSED_ENTRY(202),
+ ICE_PTT_UNUSED_ENTRY(203),
+ ICE_PTT_UNUSED_ENTRY(204),
+ ICE_PTT_UNUSED_ENTRY(205),
+ ICE_PTT_UNUSED_ENTRY(206),
+ ICE_PTT_UNUSED_ENTRY(207),
+ ICE_PTT_UNUSED_ENTRY(208),
+ ICE_PTT_UNUSED_ENTRY(209),
+
+ ICE_PTT_UNUSED_ENTRY(210),
+ ICE_PTT_UNUSED_ENTRY(211),
+ ICE_PTT_UNUSED_ENTRY(212),
+ ICE_PTT_UNUSED_ENTRY(213),
+ ICE_PTT_UNUSED_ENTRY(214),
+ ICE_PTT_UNUSED_ENTRY(215),
+ ICE_PTT_UNUSED_ENTRY(216),
+ ICE_PTT_UNUSED_ENTRY(217),
+ ICE_PTT_UNUSED_ENTRY(218),
+ ICE_PTT_UNUSED_ENTRY(219),
+
+ ICE_PTT_UNUSED_ENTRY(220),
+ ICE_PTT_UNUSED_ENTRY(221),
+ ICE_PTT_UNUSED_ENTRY(222),
+ ICE_PTT_UNUSED_ENTRY(223),
+ ICE_PTT_UNUSED_ENTRY(224),
+ ICE_PTT_UNUSED_ENTRY(225),
+ ICE_PTT_UNUSED_ENTRY(226),
+ ICE_PTT_UNUSED_ENTRY(227),
+ ICE_PTT_UNUSED_ENTRY(228),
+ ICE_PTT_UNUSED_ENTRY(229),
+
+ ICE_PTT_UNUSED_ENTRY(230),
+ ICE_PTT_UNUSED_ENTRY(231),
+ ICE_PTT_UNUSED_ENTRY(232),
+ ICE_PTT_UNUSED_ENTRY(233),
+ ICE_PTT_UNUSED_ENTRY(234),
+ ICE_PTT_UNUSED_ENTRY(235),
+ ICE_PTT_UNUSED_ENTRY(236),
+ ICE_PTT_UNUSED_ENTRY(237),
+ ICE_PTT_UNUSED_ENTRY(238),
+ ICE_PTT_UNUSED_ENTRY(239),
+
+ ICE_PTT_UNUSED_ENTRY(240),
+ ICE_PTT_UNUSED_ENTRY(241),
+ ICE_PTT_UNUSED_ENTRY(242),
+ ICE_PTT_UNUSED_ENTRY(243),
+ ICE_PTT_UNUSED_ENTRY(244),
+ ICE_PTT_UNUSED_ENTRY(245),
+ ICE_PTT_UNUSED_ENTRY(246),
+ ICE_PTT_UNUSED_ENTRY(247),
+ ICE_PTT_UNUSED_ENTRY(248),
+ ICE_PTT_UNUSED_ENTRY(249),
+
+ ICE_PTT_UNUSED_ENTRY(250),
+ ICE_PTT_UNUSED_ENTRY(251),
+ ICE_PTT_UNUSED_ENTRY(252),
+ ICE_PTT_UNUSED_ENTRY(253),
+ ICE_PTT_UNUSED_ENTRY(254),
+ ICE_PTT_UNUSED_ENTRY(255),
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8f6a191839f1..f2682776f8c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
case ICE_VSI_PF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
- vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
- vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ /* a user could change the values of num_[tr]x_desc using
+ * ethtool -G so we should keep those values instead of
+ * overwriting them with the defaults.
+ */
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
@@ -2011,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
if (!vsi)
return -EINVAL;
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
pf = vsi->back;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 231f4b6e93d0..8437d72795b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ ice_cfg_vlan_pruning(vsi, false, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
+ if (vsi->num_vlan > 1)
+ ice_cfg_vlan_pruning(vsi, true, false);
}
}
}
@@ -767,6 +770,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
}
/**
+ * ice_set_dflt_mib - send a default config MIB to the FW
+ * @pf: private PF struct
+ *
+ * This function sends a default configuration MIB to the FW.
+ *
+ * If this function errors out at any point, the driver is still able to
+ * function. The main impact is that LFC may not operate as expected.
+ * Therefore an error state in this function should be treated with a DBG
+ * message and continue on with driver rebuild/reenable.
+ */
+static void ice_set_dflt_mib(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u8 mib_type, *buf, *lldpmib = NULL;
+ u16 len, typelen, offset = 0;
+ struct ice_lldp_org_tlv *tlv;
+ struct ice_hw *hw;
+ u32 ouisubtype;
+
+ if (!pf) {
+ dev_dbg(dev, "%s NULL pf pointer\n", __func__);
+ return;
+ }
+
+ hw = &pf->hw;
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib) {
+ dev_dbg(dev, "%s Failed to allocate MIB memory\n",
+ __func__);
+ return;
+ }
+
+ /* Add ETS CFG TLV */
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf = tlv->tlvinfo;
+ buf[0] = 0;
+
+ /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
+ * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
+ * Octets 13 - 20 are TSA values - leave as zeros
+ */
+ buf[5] = 0x64;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add ETS REC TLV */
+ buf = tlv->tlvinfo;
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First octet of buf is reserved
+ * Octets 1 - 4 map UP to TC - all UPs map to zero
+ * Octets 5 - 12 are BW values - set TC 0 to 100%.
+ * Octets 13 - 20 are TSA value - leave as zeros
+ */
+ buf[5] = 0x64;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add PFC CFG TLV */
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Octet 1 left as all zeros - PFC disabled */
+ buf[0] = 0x08;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+
+ if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
+ dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
+
+ kfree(lldpmib);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -800,6 +897,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
+ link_up = true;
+
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
return -EINVAL;
@@ -821,7 +924,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return result;
- ice_dcb_rebuild(pf);
+ if (ice_is_dcb_active(pf)) {
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+ } else {
+ if (link_up)
+ ice_set_dflt_mib(pf);
+ }
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -914,6 +1023,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status;
}
+enum ice_aq_task_state {
+ ICE_AQ_TASK_WAITING = 0,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+
+ u16 opcode;
+ struct ice_rq_event_info *event;
+ enum ice_aq_task_state state;
+};
+
+/**
+ * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode to wait for
+ * @timeout: how long to wait, in jiffies
+ * @event: storage for the event info
+ *
+ * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
+ * current thread will be put to sleep until the specified event occurs or
+ * until the given timeout is reached.
+ *
+ * To obtain only the descriptor contents, pass an event without an allocated
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * event->msg_buf with enough space ahead of time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ long ret;
+ int err;
+
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->event = event;
+ task->state = ICE_AQ_TASK_WAITING;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ timeout);
+ switch (task->state) {
+ case ICE_AQ_TASK_WAITING:
+ err = ret < 0 ? ret : -ETIMEDOUT;
+ break;
+ case ICE_AQ_TASK_CANCELED:
+ err = ret < 0 ? ret : -ECANCELED;
+ break;
+ case ICE_AQ_TASK_COMPLETE:
+ err = ret < 0 ? ret : 0;
+ break;
+ default:
+ WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ err = -EINVAL;
+ break;
+ }
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_del(&task->entry);
+ spin_unlock_bh(&pf->aq_wait_lock);
+ kfree(task);
+
+ return err;
+}
+
+/**
+ * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode of the event
+ * @event: the event to check
+ *
+ * Loops over the current list of pending threads waiting for an AdminQ event.
+ * For each matching task, copy the contents of the event into the task
+ * structure and wake up the thread.
+ *
+ * If multiple threads wait for the same opcode, they will all be woken up.
+ *
+ * Note that event->msg_buf will only be duplicated if the event has a buffer
+ * with enough space already allocated. Otherwise, only the descriptor and
+ * message length will be copied.
+ *
+ * Returns: true if an event was found, false otherwise
+ */
+static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ bool found = false;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
+ if (task->state || task->opcode != opcode)
+ continue;
+
+ memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+ task->event->msg_len = event->msg_len;
+
+ /* Only copy the data buffer if a destination was set */
+ if (task->event->msg_buf &&
+ task->event->buf_len > event->buf_len) {
+ memcpy(task->event->msg_buf, event->msg_buf,
+ event->buf_len);
+ task->event->buf_len = event->buf_len;
+ }
+
+ task->state = ICE_AQ_TASK_COMPLETE;
+ found = true;
+ }
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ if (found)
+ wake_up(&pf->aq_wait_queue);
+}
+
+/**
+ * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
+ * @pf: the PF private structure
+ *
+ * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
+ * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
+ */
+static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
+{
+ struct ice_aq_task *task;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry)
+ task->state = ICE_AQ_TASK_CANCELED;
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ wake_up(&pf->aq_wait_queue);
+}
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
@@ -1010,6 +1264,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
+ /* Notify any thread that might be waiting for this event */
+ ice_aq_check_events(pf, opcode, &event);
+
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
@@ -3086,6 +3343,10 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
@@ -3323,6 +3584,60 @@ done:
}
/**
+ * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
+ * @pf: PF to configure
+ *
+ * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
+ * VSI can still Tx/Rx VLAN tagged packets.
+ */
+static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_ctx *ctxt;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!vsi)
+ return;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
+ hw = &pf->hw;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ /* disable VLAN anti-spoof */
+ ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ /* disable VLAN pruning and keep all other settings */
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ /* allow all VLANs on Tx and don't strip on Rx */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ }
+
+ kfree(ctxt);
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -3819,7 +4134,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
UTS_RELEASE, err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
@@ -3828,19 +4143,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
err = ice_update_link_info(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_update_link_info failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
ice_init_link_dflt_override(pf->hw.port_info);
@@ -3851,7 +4166,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
@@ -3878,9 +4193,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable(dev, false);
- /* If no DDP driven features have to be setup, we are done with probe */
- if (ice_is_safe_mode(pf))
+ if (ice_is_safe_mode(pf)) {
+ ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
+ }
/* initialize DDP driven features */
@@ -3904,6 +4220,8 @@ probe_done:
clear_bit(__ICE_DOWN, pf->state);
return 0;
+err_send_version_unroll:
+ ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
@@ -4014,6 +4332,8 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -4147,7 +4467,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int ice_suspend(struct device *dev)
+static int __maybe_unused ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -4211,7 +4531,7 @@ static int ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int ice_resume(struct device *dev)
+static int __maybe_unused ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -4360,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
+ ice_restore_all_vfs_msi_state(pdev);
+
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -4968,6 +5290,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
+ vsi->rx_gro_dropped = 0;
rcu_read_lock();
@@ -4982,6 +5305,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
@@ -5013,7 +5337,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
@@ -5656,10 +5980,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (err)
goto err_sched_init_port;
- err = ice_update_link_info(hw->port_info);
- if (err)
- dev_err(dev, "Get link status error %d\n", err);
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 7c2a06892bbb..5903a36763de 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -108,6 +108,76 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
}
/**
+ * ice_aq_update_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands (0x0703)
+ */
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_aq_erase_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @cd: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands (0x0702)
+ */
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -634,3 +704,119 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
return status;
}
+
+/**
+ * ice_nvm_write_activate
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash (0x0707)
+ */
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+{
+ struct ice_aqc_nvm *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.nvm;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
+
+ cmd->cmd_flags = cmd_flags;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_nvm_update_empr
+ * @hw: pointer to the HW struct
+ *
+ * Update empr (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ */
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/* ice_nvm_set_pkg_data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set package data (0x070A). This command is equivalent to the reception
+ * of a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ */
+
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pkg_data *cmd;
+ struct ice_aq_desc desc;
+
+ if (length != 0 && !data)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pkg_data;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/* ice_nvm_pass_component_tbl
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B AQC.
+ * @comp_response_code: a pointer to the response code from the 0x070B AQC.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Pass component table (0x070B). This command is equivalent to the reception
+ * of a PLDM FW Update PassComponentTable cmd. This command should be sent once
+ * per component. It can be only sent after Set Package Data cmd and before
+ * actual update. FW will assume these commands are going to be sent until
+ * the TransferFlag is set to End or StartAndEnd.
+ */
+
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pass_comp_tbl *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || !comp_response || !comp_response_code)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+
+ if (!status) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 999f273ba6ad..8d430909f846 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -17,4 +17,20 @@ enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd);
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 1c29cfa1cf33..44a228530253 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
- /* query the current node information from FW before additing it
+ /* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
- * @opcode:opcode for add, query, or remove profile(s)
+ * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -1276,6 +1276,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
+ * @pi: port information structure
+ * @vsi_node: software VSI handle
+ * @qgrp_node: first queue group node identified for scanning
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
+ * of queues currently assigned.
+ */
+static struct ice_sched_node *
+ice_sched_get_free_qgrp(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node,
+ struct ice_sched_node *qgrp_node, u8 owner)
+{
+ struct ice_sched_node *min_qgrp;
+ u8 min_children;
+
+ if (!qgrp_node)
+ return qgrp_node;
+ min_children = qgrp_node->num_children;
+ if (!min_children)
+ return qgrp_node;
+ min_qgrp = qgrp_node;
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
+ * equal number of shares and active. The bandwidth will be equally
+ * distributed across all queues.
+ */
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < min_children &&
+ qgrp_node->owner == owner) {
+ /* replace the new min queue group node */
+ min_qgrp = qgrp_node;
+ min_children = min_qgrp->num_children;
+ /* break if it has no children, */
+ if (!min_children)
+ break;
+ }
+ qgrp_node = qgrp_node->sibling;
+ }
+ return min_qgrp;
+}
+
+/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1288,7 +1335,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
- struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@@ -1302,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
- goto lan_q_exit;
+ return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@@ -1315,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
-lan_q_exit:
- return qgrp_node;
+ /* Select the best queue group */
+ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@@ -2153,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
- rl_prof_elem->bw == bw)
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@@ -2384,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type &&
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
@@ -2546,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return 0;
return ice_sched_rm_rl_profile(pi, layer_num,
- rl_prof_info->profile.flags,
- old_id);
+ rl_prof_info->profile.flags &
+ ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ccbe1cc64295..c3a6c41385ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -495,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
@@ -506,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
cpu_to_le16(num_rules);
- return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ status = ICE_ERR_DOES_NOT_EXIST;
+
+ return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index abdb137c8bb7..9d0d6b0025cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
-static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
- unsigned int size)
+static unsigned int
+ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
+ if (likely(page))
return true;
- }
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -1254,12 +1251,12 @@ construct_skb:
* @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > ICE_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (ICE_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > ICE_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e70c4619edc3..51b4df7a59d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,7 +193,7 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 page_reuse_count;
+ u64 gro_dropped; /* GRO returned dropped */
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..bc2f4390b51d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
+ /* this is tracked separately to help us debug stack drops */
+ rx_ring->rx_stats.gro_dropped++;
+ netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
+ rx_ring->q_index);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 08c616d9fffd..4cdccfadf274 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -244,6 +244,15 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
+
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define ICE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define ICE_NVM_PENDING_OROM BIT(1)
+#define ICE_NVM_PENDING_NETLIST BIT(2)
+ bool nvm_unified_update;
+#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
/* Function specific capabilities */
@@ -312,7 +321,7 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
- u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
struct ice_link_default_override_tlv {
@@ -400,7 +409,7 @@ enum ice_rl_type {
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
-#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
@@ -771,6 +780,9 @@ struct ice_hw_port_stats {
#define ICE_OROM_VER_SHIFT 24
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
+#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_1ST_OROM_BANK_PTR 0x44
+#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* Link override related */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 9df5ceb26ab9..71497776ac62 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
@@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size;
}
- /* VF can request to configure less than allocated queues
- * or default allocated queues. So update the VSI with new number
+ /* VF can request to configure less than allocated queues or default
+ * allocated queues. So update the VSI with new number
*/
vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq;
@@ -4071,3 +4073,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
}
}
}
+
+/**
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
+ *
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ struct pci_dev *vfdev;
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 67aa9110fdd1..0f519fba3770 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -32,6 +32,7 @@
#define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -114,6 +115,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -146,6 +148,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
+#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 6badfd62dc63..20ac5fca68c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-
/**
* ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI
@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
-
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
@@ -706,8 +704,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -783,12 +779,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
- if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c2cf414d126b..6e8231c1ddf0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1782,8 +1782,8 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size /= 2;
memset(&skb->data[frame_size], 0xAA, frame_size - 1);
- memset(&skb->data[frame_size + 10], 0xBE, 1);
- memset(&skb->data[frame_size + 12], 0xAF, 1);
+ skb->data[frame_size + 10] = 0xBE;
+ skb->data[frame_size + 12] = 0xAF;
}
static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ae8d64324619..4f05f6efe6af 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6215,9 +6215,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -7159,7 +7168,7 @@ static void igb_flush_mac_table(struct igb_adapter *adapter)
for (i = 0; i < hw->mac.rar_entry_count; i++) {
adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
adapter->mac_table[i].queue = 0;
igb_rar_set_index(adapter, i);
}
@@ -7308,7 +7317,7 @@ static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
} else {
adapter->mac_table[i].state = 0;
adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
}
igb_rar_set_index(adapter, i);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 97a065928976..19269f5d52bc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2457,13 +2457,10 @@ static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igbvf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct igbvf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
netif_device_detach(netdev);
@@ -2473,31 +2470,16 @@ static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
igbvf_free_irq(adapter);
}
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
- pci_disable_device(pdev);
-
return 0;
}
-#ifdef CONFIG_PM
-static int igbvf_resume(struct pci_dev *pdev)
+static int __maybe_unused igbvf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct igbvf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_restore_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
-
pci_set_master(pdev);
if (netif_running(netdev)) {
@@ -2515,11 +2497,10 @@ static int igbvf_resume(struct pci_dev *pdev)
return 0;
}
-#endif
static void igbvf_shutdown(struct pci_dev *pdev)
{
- igbvf_suspend(pdev, PMSG_SUSPEND);
+ igbvf_suspend(&pdev->dev);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2960,17 +2941,15 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
+static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
.name = igbvf_driver_name,
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igbvf_suspend,
- .resume = igbvf_resume,
-#endif
+ .driver.pm = &igbvf_pm_ops,
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 2ab7d9fab6af..b9fe51b91c47 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -82,13 +82,7 @@ struct igc_mac_info {
enum igc_mac_type type;
- u32 collision_delta;
- u32 ledctl_default;
- u32 ledctl_mode1;
- u32 ledctl_mode2;
u32 mc_filter_type;
- u32 tx_packet_delta;
- u32 txcw;
u16 mta_reg_count;
u16 uta_reg_count;
@@ -98,8 +92,6 @@ struct igc_mac_info {
u8 forced_speed_duplex;
- bool adaptive_ifs;
- bool has_fwsm;
bool asf_firmware_present;
bool arc_subsystem_valid;
@@ -276,21 +268,9 @@ struct igc_hw_stats {
u64 tsctc;
u64 tsctfc;
u64 iac;
- u64 icrxptc;
- u64 icrxatc;
- u64 ictxptc;
- u64 ictxatc;
- u64 ictxqec;
- u64 ictxqmtc;
- u64 icrxdmtc;
- u64 icrxoc;
- u64 cbtmpc;
u64 htdpmc;
- u64 cbrdpc;
- u64 cbrmpc;
u64 rpthc;
u64 hgptc;
- u64 htcbdpc;
u64 hgorc;
u64 hgotc;
u64 lenerrs;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index b47e7b0a6398..09cd0ec7ee87 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -295,20 +295,12 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_MGTPTC);
rd32(IGC_IAC);
- rd32(IGC_ICRXOC);
-
- rd32(IGC_ICRXPTC);
- rd32(IGC_ICRXATC);
- rd32(IGC_ICTXPTC);
- rd32(IGC_ICTXATC);
- rd32(IGC_ICTXQEC);
- rd32(IGC_ICTXQMTC);
- rd32(IGC_ICRXDMTC);
rd32(IGC_RPTHC);
rd32(IGC_TLPIC);
rd32(IGC_RLPIC);
rd32(IGC_HGPTC);
+ rd32(IGC_RXDMTC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
@@ -363,8 +355,8 @@ void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
s32 igc_check_for_copper_link(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
+ bool link = false;
s32 ret_val;
- bool link;
/* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8d5869dcf798..7a6f2a0d413f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3730,14 +3730,6 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.tsctc += rd32(IGC_TSCTC);
adapter->stats.iac += rd32(IGC_IAC);
- adapter->stats.icrxoc += rd32(IGC_ICRXOC);
- adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
- adapter->stats.icrxatc += rd32(IGC_ICRXATC);
- adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
- adapter->stats.ictxatc += rd32(IGC_ICTXATC);
- adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
- adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
- adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
/* Fill out the OS statistics structure */
net_stats->multicast = adapter->stats.mprc;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 1c46cec5a799..b52dd9d737e8 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -58,16 +58,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* Interrupt Cause */
-#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */
-#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */
-#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */
-#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */
-#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */
-#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */
-#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
-#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
-
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
@@ -182,10 +172,6 @@
#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
-#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
-#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
-#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
-#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */
#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6725d892336e..71ec908266a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1951,8 +1951,8 @@ static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size >>= 1;
memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size + 10], 0xBE, 1);
- memset(&skb->data[frame_size + 12], 0xAF, 1);
+ skb->data[frame_size + 10] = 0xBE;
+ skb->data[frame_size + 12] = 0xAF;
}
static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6f32b1706ab9..2f8a4cfc5fa1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6877,32 +6877,20 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_resume(struct pci_dev *pdev)
+static int __maybe_unused ixgbe_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
adapter->hw.hw_addr = adapter->io_addr;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /*
- * pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- e_dev_err("Cannot enable PCI device from suspend\n");
- return err;
- }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
- pci_wake_from_d3(pdev, false);
+ device_wakeup_disable(dev_d);
ixgbe_reset(adapter);
@@ -6920,7 +6908,6 @@ static int ixgbe_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
@@ -6929,9 +6916,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl;
u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -6942,12 +6926,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
if (hw->mac.ops.stop_link_on_d3)
hw->mac.ops.stop_link_on_d3(hw);
@@ -7002,26 +6980,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ixgbe_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
bool wake;
retval = __ixgbe_shutdown(pdev, &wake);
- if (retval)
- return retval;
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ device_set_wakeup_enable(dev_d, wake);
- return 0;
+ return retval;
}
-#endif /* CONFIG_PM */
static void ixgbe_shutdown(struct pci_dev *pdev)
{
@@ -11379,16 +11349,15 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
+static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+
static struct pci_driver ixgbe_driver = {
- .name = ixgbe_driver_name,
- .id_table = ixgbe_pci_tbl,
- .probe = ixgbe_probe,
- .remove = ixgbe_remove,
-#ifdef CONFIG_PM
- .suspend = ixgbe_suspend,
- .resume = ixgbe_resume,
-#endif
- .shutdown = ixgbe_shutdown,
+ .name = ixgbe_driver_name,
+ .id_table = ixgbe_pci_tbl,
+ .probe = ixgbe_probe,
+ .remove = ixgbe_remove,
+ .driver.pm = &ixgbe_pm_ops,
+ .shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 23a92656821d..988db46bff0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -783,7 +783,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
ETH_ALEN);
else
- memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
+ eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses);
return retval;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a6267569bfa9..a428113e6d54 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4297,13 +4297,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -4314,37 +4311,16 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
ixgbevf_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
- pci_disable_device(pdev);
-
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbevf_resume(struct pci_dev *pdev)
+static int __maybe_unused ixgbevf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
-
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
@@ -4365,10 +4341,9 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static void ixgbevf_shutdown(struct pci_dev *pdev)
{
- ixgbevf_suspend(pdev, PMSG_SUSPEND);
+ ixgbevf_suspend(&pdev->dev);
}
static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
@@ -4882,16 +4857,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
+static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = ixgbevf_remove,
-#ifdef CONFIG_PM
+
/* Power Management Hooks */
- .suspend = ixgbevf_suspend,
- .resume = ixgbevf_resume,
-#endif
+ .driver.pm = &ixgbevf_pm_ops,
+
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 6e3f9e2f883b..832bbb8b05c8 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3637,7 +3637,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
phylink_start(pp->phylink);
- /* We may have called phy_speed_down before */
+ /* We may have called phylink_speed_down before */
phylink_speed_up(pp->phylink);
netif_tx_start_all_queues(pp->dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index cd5e9d60307e..2a8a5842eaef 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3508,6 +3508,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
err = mvpp2_rx_refill(port, bm_pool, pp, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
+ dev_kfree_skb_any(skb);
goto err_drop_frame;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 64786568af0d..75a8c407e815 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
+ rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
+ rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index f4227517dc8e..92a3db69a6cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 20db302d31ce..0870fe78ea38 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
{
u32 val;
int ret;
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- if (state->interface !=
- PHY_INTERFACE_MODE_TRGMII)
- mtk_gmac0_rgmii_adjust(mac->hw,
- state->speed);
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -2887,6 +2896,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8a10285b0e10..b50c567ef508 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -806,10 +806,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c790a5fcea73..ae305c2e9225 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -558,7 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
- /* fall through */
+ fallthrough;
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 954c22c79f6b..258c7a96f269 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4356,12 +4356,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 9486caecfbdc..f1b4ad9c66d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1412,7 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
- /* fall through */
+ fallthrough;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
@@ -1442,7 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
- /* fall through */
+ fallthrough;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c44669102626..0cc2080fd847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -45,6 +45,7 @@
#include <linux/mlx5/transobj.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
+#include <net/udp_tunnel.h>
#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/dim.h>
@@ -530,6 +531,8 @@ typedef struct sk_buff *
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
+
enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT,
MLX5E_RQ_FLAG_XDP_REDIRECT,
@@ -790,6 +793,7 @@ struct mlx5e_priv {
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
@@ -812,6 +816,13 @@ struct mlx5e_priv {
struct mlx5e_scratchpad scratchpad;
};
+struct mlx5e_rx_handlers {
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+};
+
+extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
+
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
@@ -828,58 +839,17 @@ struct mlx5e_profile {
void (*update_carrier)(struct mlx5e_priv *priv);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
- struct {
- mlx5e_fp_handle_rx_cqe handle_rx_cqe;
- mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
- } rx_handlers;
+ const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
};
void mlx5e_build_ptys2ethtool_map(void);
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
-netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
-
-void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
-void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
-void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
-int mlx5e_napi_poll(struct napi_struct *napi, int budget);
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
-
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle);
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
-int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
-bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-struct sk_buff *
-mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
-struct sk_buff *
-mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
-struct sk_buff *
-mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
-struct sk_buff *
-mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
-
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -982,8 +952,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
-void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
@@ -1008,6 +976,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
+void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
/* common netdev helpers */
void mlx5e_create_q_counters(struct mlx5e_priv *priv);
@@ -1045,6 +1014,7 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate);
+void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -1113,8 +1083,6 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
-void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index bdb71332cbf2..3e44e4d820c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index f8af109d34cc..79cc42d88eec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
- tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else {
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
if (!tun_dst) {
- WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 951ea26d96bc..e472ed0eacfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 58b13192df23..2805416c32a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 37b176801bcc..038a0f1cecec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index cf425a60cddc..9334c9c3e208 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -5,6 +5,7 @@
#define __MLX5_EN_TXRX_H___
#include "en.h"
+#include <linux/indirect_call_wrapper.h>
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -18,6 +19,33 @@ enum mlx5e_icosq_wqe_type {
#endif
};
+/* General */
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
+
+/* RX */
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info,
+ bool recycle);
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+
+/* TX */
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
@@ -360,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* fall through */
+ fallthrough;
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index e0c1b010d41a..0e6946fc121f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -34,7 +34,6 @@
#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
-#include <linux/indirect_call_wrapper.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
@@ -153,11 +152,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
return true;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
xdp_abort:
trace_xdp_exception(rq->netdev, prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
rq->stats->xdp_drop++;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index cc46414773b5..dd9df519d383 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -3,6 +3,7 @@
#include "setup.h"
#include "en/params.h"
+#include "en/txrx.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 0dfbc96e952a..4d892f6cecb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -6,7 +6,6 @@
#include "en/xdp.h"
#include "en/params.h"
#include <net/xdp_sock_drv.h>
-#include <linux/indirect_call_wrapper.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2a47673da5a4..f96e786db158 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -47,7 +47,6 @@
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
-void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 0e6698d1b4ca..f4861545b236 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -470,7 +470,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- /* fall-through */
+ fallthrough;
case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1e42c7ae621b..a6cf008057b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -60,6 +60,16 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
+void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
+{
+ bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
+ bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+ bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
+
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
+}
+
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey)
{
@@ -76,7 +86,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
-
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index af849bc83c30..08270987c506 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -243,7 +243,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
- /* fallthrough */
+ fallthrough;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index aa4fb503dac3..aebcf73f8546 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,7 +45,6 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
-#include "en_accel/ipsec_rxtx.h"
#include "en_accel/en_accel.h"
#include "en_accel/tls.h"
#include "accel/ipsec.h"
@@ -65,7 +64,6 @@
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
-#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -276,7 +274,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
-
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
@@ -419,7 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@@ -428,29 +426,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
mlx5e_mpwqe_get_log_rq_size(params, xsk);
- rq->post_wqes = mlx5e_post_rx_mpwqes;
- rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
-
- rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev)) {
- err = -EINVAL;
- netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
- goto err_rq_wq_destroy;
- }
-#endif
- if (!rq->handle_rx_cqe) {
- err = -EINVAL;
- netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
- goto err_rq_wq_destroy;
- }
-
- rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
- mlx5e_xsk_skb_from_cqe_mpwrq_linear :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
- mlx5e_skb_from_cqe_mpwrq_linear :
- mlx5e_skb_from_cqe_mpwrq_nonlinear;
-
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
@@ -470,7 +445,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -492,30 +467,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (err)
goto err_free;
- rq->post_wqes = mlx5e_post_rx_wqes;
- rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-#ifdef CONFIG_MLX5_EN_IPSEC
- if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- c->priv->ipsec)
- rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
- else
-#endif
- rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
- if (!rq->handle_rx_cqe) {
- err = -EINVAL;
- netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
- goto err_free;
- }
-
- rq->wqe.skb_from_cqe = xsk ?
- mlx5e_xsk_skb_from_cqe_linear :
- mlx5e_rx_is_linear_skb(params, NULL) ?
- mlx5e_skb_from_cqe_linear :
- mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be;
}
+ err = mlx5e_rq_set_handlers(rq, params, xsk);
+ if (err)
+ goto err_free;
+
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
@@ -3104,6 +3062,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
+ enum mlx5_port_status state)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int vport_admin_state;
+
+ mlx5_set_port_admin_status(mdev, state);
+
+ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ if (state == MLX5_PORT_UP)
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ else
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
+ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3136,7 +3113,7 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
@@ -3170,7 +3147,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -4214,83 +4191,6 @@ int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- u16 port;
-};
-
-static void mlx5e_vxlan_add_work(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
-
- mutex_lock(&priv->state_lock);
- mlx5_vxlan_add_port(priv->mdev->vxlan, port);
- mutex_unlock(&priv->state_lock);
-
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_work(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
-
- mutex_lock(&priv->state_lock);
- mlx5_vxlan_del_port(priv->mdev->vxlan, port);
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
- return;
-
- mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
-}
-
-void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
- return;
-
- mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
-}
-
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
@@ -4602,8 +4502,8 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
- .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
@@ -4874,6 +4774,39 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
+static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
+}
+
+static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
+}
+
+void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
+{
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
+ return;
+
+ priv->nic_info.set_port = mlx5e_vxlan_set_port;
+ priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
+ /* Don't count the space hard-coded to the IANA port */
+ priv->nic_info.tables[0].n_entries =
+ mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
+
+ priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
+}
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4917,6 +4850,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ mlx5e_vxlan_set_netdev_info(priv);
+
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
mlx5e_any_tunnel_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
@@ -5201,7 +5136,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@@ -5222,8 +5157,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_get_rx_info(netdev);
+ udp_tunnel_nic_reset_ntf(priv->netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5238,8 +5172,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_drop_rx_info(priv->netdev);
netif_device_detach(priv->netdev);
rtnl_unlock();
@@ -5270,8 +5202,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_rx = mlx5e_update_nic_rx,
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
- .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
@@ -5409,6 +5340,8 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ cancel_work_sync(&priv->update_stats_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c300729fb498..e13e5d1b3eae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -42,6 +42,7 @@
#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
+#include "en/txrx.h"
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
@@ -610,6 +611,29 @@ static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
return &rpriv->dl_port;
}
+static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int err;
+
+ if (new_carrier) {
+ err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
+ if (err)
+ return err;
+ netif_carrier_on(dev);
+ } else {
+ err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
+ if (err)
+ return err;
+ netif_carrier_off(dev);
+ }
+ return 0;
+}
+
static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_open = mlx5e_rep_open,
.ndo_stop = mlx5e_rep_close,
@@ -620,6 +644,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_rep_change_mtu,
+ .ndo_change_carrier = mlx5e_rep_change_carrier,
};
static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
@@ -633,8 +658,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
- .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
@@ -699,12 +724,13 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
+ SET_NETDEV_DEV(netdev, mdev->device);
if (rep->vport == MLX5_VPORT_UPLINK) {
- SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
+ mlx5e_vxlan_set_netdev_info(priv);
mlx5e_dcbnl_build_rep_netdev(netdev);
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
@@ -935,6 +961,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@@ -1079,6 +1106,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
@@ -1143,8 +1172,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats,
- .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
+ .rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
@@ -1163,8 +1191,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
- .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
+ .rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 1d5669801484..622c27ae4ac7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -41,6 +41,8 @@
#include "lib/port_tun.h"
#ifdef CONFIG_MLX5_ESWITCH
+extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep;
+
struct mlx5e_neigh_update_table {
struct rhashtable neigh_ht;
/* Save the neigh hash entries in a list in addition to the hash table
@@ -223,10 +225,6 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe);
-
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 74860f3827b1..65828af120b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,22 +34,39 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
#include "en.h"
+#include "en/txrx.h"
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
+#include "accel/ipsec.h"
+#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
+#include "en/params.h"
+
+static struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+static struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
+ .handle_rx_cqe = mlx5e_handle_rx_cqe,
+ .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
mlx5e_put_rx_frag(rq, wi, recycle);
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
@@ -537,14 +554,14 @@ err:
return err;
}
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
/* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false);
}
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u8 wqe_bulk;
@@ -685,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
return i;
}
-bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
+INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
@@ -1106,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
xdp->frame_sz = rq->buff.frame0_sz;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
@@ -1146,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
@@ -1201,7 +1218,7 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
}
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1244,7 +1261,7 @@ wq_cyc_pop:
}
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1299,8 +1316,7 @@ wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
-void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
@@ -1358,9 +1374,14 @@ mpwrq_cqe_out:
mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
+
+const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
+ .handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
+};
#endif
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
@@ -1406,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return skb;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
@@ -1456,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
@@ -1652,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
stats->bytes += cqe_bcnt;
}
-void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1688,11 +1709,15 @@ wq_free_wqe:
mlx5_wq_cyc_pop(wq);
}
+const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
+ .handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .handle_rx_cqe_mpwqe = NULL, /* Not supported */
+};
#endif /* CONFIG_MLX5_CORE_IPOIB */
#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1729,3 +1754,55 @@ wq_free_wqe:
}
#endif /* CONFIG_MLX5_EN_IPSEC */
+
+int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
+{
+ struct mlx5_core_dev *mdev = rq->mdev;
+ struct mlx5e_channel *c = rq->channel;
+
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
+ mlx5e_skb_from_cqe_mpwrq_linear :
+ mlx5e_skb_from_cqe_mpwrq_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_mpwqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev)) {
+ netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+ return -EINVAL;
+ }
+#endif
+ if (!rq->handle_rx_cqe) {
+ netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ rq->wqe.skb_from_cqe = xsk ?
+ mlx5e_xsk_skb_from_cqe_linear :
+ mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_wqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
+ c->priv->ipsec)
+ rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
+ else
+#endif
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(c->netdev, "RX handler of RQ is not set\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7a0c22d05575..fd53d101d8fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2247,6 +2247,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e3dbab2a294c..de10b06bade5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,8 +31,8 @@
*/
#include <linux/irq.h>
-#include <linux/indirect_call_wrapper.h>
#include "en.h"
+#include "en/txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e8f900e9577e..6e6a9a563992 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1632,7 +1632,7 @@ abort:
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
-
+ esw_destroy_tsar(esw);
return err;
}
@@ -1687,8 +1687,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
- esw_destroy_tsar(esw);
-
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1698,6 +1696,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ esw_destroy_tsar(esw);
+
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@@ -1949,6 +1949,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -1956,15 +1958,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
+ if (vport == MLX5_VPORT_UPLINK) {
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
+ other_vport = 0;
+ vport = 0;
+ }
mutex_lock(&esw->state_lock);
- err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport, 1, link_state);
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
- mlx5_core_warn(esw->dev,
- "Failed to set vport %d link state, err = %d",
- vport, err);
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
+ vport, opmod, err);
goto unlock;
}
@@ -2006,8 +2010,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@@ -2035,6 +2037,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b68e02ad65e2..867d8120b8a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -271,7 +271,6 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads;
int mode;
- int nvports;
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
@@ -686,6 +685,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index db856d70c4f8..d2516922d867 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
+static void
+mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+ attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
-
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -1132,7 +1137,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
}
-static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
@@ -1165,7 +1170,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
+ table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
@@ -1202,7 +1207,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
+ ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
@@ -1270,7 +1275,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
- esw->nvports = nvports;
kvfree(flow_group_in);
return 0;
@@ -1311,7 +1315,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
MLX5_FLOW_STEERING_MODE_DMFS);
}
-static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -1325,7 +1329,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1346,14 +1350,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(offloads->ft_offloads);
}
-static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
+ int nvports;
int err = 0;
- nvports = nvports + MLX5_ESW_MISS_FLOWS;
+ nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1986,15 +1991,8 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
- int num_vfs = esw->esw_funcs.num_vfs;
- int total_vports;
int err;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- total_vports = esw->total_vports;
- else
- total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
-
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
@@ -2003,7 +2001,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_acl_err;
- err = esw_create_offloads_table(esw, total_vports);
+ err = esw_create_offloads_table(esw);
if (err)
goto create_offloads_err;
@@ -2011,11 +2009,11 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_restore_err;
- err = esw_create_offloads_fdb_tables(esw, total_vports);
+ err = esw_create_offloads_fdb_tables(esw);
if (err)
goto create_fdb_err;
- err = esw_create_vport_rx_group(esw, total_vports);
+ err = esw_create_vport_rx_group(esw);
if (err)
goto create_fg_err;
@@ -2353,7 +2351,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
goto out;
- /* fall through */
+ fallthrough;
case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
err = -EOPNOTSUPP;
@@ -2465,13 +2463,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
esw->offloads.encap = encap;
- err = esw_create_offloads_fdb_tables(esw, esw->nvports);
+ err = esw_create_offloads_fdb_tables(esw);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
- (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
+ (void)esw_create_offloads_fdb_tables(esw);
}
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 182d3ac3e73f..831d2c39e153 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -339,14 +339,14 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
switch (opcode) {
case MLX5_CQE_REQ_ERR:
status = ((struct mlx5_err_cqe *)cqe)->syndrome;
- /* Fall through */
+ fallthrough;
case MLX5_CQE_REQ:
mlx5_fpga_conn_sq_cqe(conn, cqe, status);
break;
case MLX5_CQE_RESP_ERR:
status = ((struct mlx5_err_cqe *)cqe)->syndrome;
- /* Fall through */
+ fallthrough;
case MLX5_CQE_RESP_SEND:
mlx5_fpga_conn_rq_cqe(conn, cqe, status);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6904ad96af48..7e70a8178a46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -796,7 +796,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
-/* If reverse if false then return the first flow table in next priority of
+/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@@ -828,34 +828,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
-static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_namespace *root_ns = &root->ns;
- struct fs_prio *iter_prio;
- struct fs_prio *prio;
-
- fs_get_obj(prio, ns->node.parent);
- list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
- if (iter_prio == prio &&
- !list_is_last(&prio->node.children, &iter_prio->node.list))
- return list_next_entry(iter_prio, node.list);
- }
- return NULL;
-}
-
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
- struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
+ bool next_ns;
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
- prio = find_fwd_ns_prio(root, ft->ns);
- else
- fs_get_obj(prio, ft->node.parent);
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
- return (prio) ? find_next_chained_ft(prio) : NULL;
+ return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
@@ -864,18 +846,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
struct mlx5_flow_table *iter;
- int i = 0;
int err;
fs_for_each_ft(iter, prio) {
- i++;
err = root->cmds->modify_flow_table(root, iter, ft);
if (err) {
- mlx5_core_warn(dev, "Failed to modify flow table %d\n",
- iter->id);
+ mlx5_core_err(dev,
+ "Failed to modify flow table id %d, type %d, err %d\n",
+ iter->id, iter->type, err);
/* The driver is out of sync with the FW */
- if (i > 1)
- WARN_ON(true);
return err;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 690b822c6152..5763965d5ef3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -464,8 +464,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.update_rx = mlx5i_update_nic_rx,
.update_stats = NULL, /* mlx5i_update_stats */
.update_carrier = NULL, /* no HW update in IB link */
- .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
- .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 79071a15c4ca..b79dc1e28c41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -42,6 +42,7 @@
extern const struct ethtool_ops mlx5i_ethtool_ops;
extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
+extern const struct mlx5e_rx_handlers mlx5i_rx_handlers;
#define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4
@@ -117,7 +118,6 @@ struct mlx5i_tx_wqe {
void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
-void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* CONFIG_MLX5_CORE_IPOIB */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index f70367018862..7163d9f6c4a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -349,8 +349,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.disable = NULL,
.update_rx = mlx5i_update_nic_rx,
.update_stats = NULL,
- .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
- .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index e9089a793632..9e68f5926ab6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -198,13 +198,13 @@ static void mlx5_lag_fib_update(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fib_nh = fib_work->fnh_info.fib_nh;
mlx5_lag_fib_nexthop_event(ldev,
@@ -255,7 +255,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
switch (event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
@@ -278,7 +278,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ef0706d15a5b..2d55b7c22c03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+
if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
- pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
- pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
+enum {
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ switch (func) {
+ case PTP_PF_NONE:
+ return 0;
+ case PTP_PF_EXTTS:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+ case PTP_PF_PEROUT:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+ u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+{
+ struct mlx5_core_dev *mdev = clock->mdev;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+ u8 mode;
+ int err;
+
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
+ return PTP_PF_NONE;
+
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
+
+ if (mode == MLX5_PIN_MODE_IN)
+ return PTP_PF_EXTTS;
+ else if (mode == MLX5_PIN_MODE_OUT)
+ return PTP_PF_PEROUT;
+
+ return PTP_PF_NONE;
+}
+
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
- clock->ptp_info.pin_config[i].chan = i;
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].chan = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index be34330d89cc..3315afe2f8dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -42,21 +42,14 @@ struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
- int num_ports;
struct mutex sync_lock; /* sync add/del port HW operations */
};
struct mlx5_vxlan_port {
struct hlist_node hlist;
- refcount_t refcount;
u16 udp_port;
};
-static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
-}
-
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
@@ -109,48 +102,24 @@ static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 p
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
- int ret = 0;
-
- mutex_lock(&vxlan->sync_lock);
- vxlanp = vxlan_lookup_port(vxlan, port);
- if (vxlanp) {
- refcount_inc(&vxlanp->refcount);
- goto unlock;
- }
+ int ret;
- if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
- mlx5_core_info(vxlan->mdev,
- "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
- port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
- ret = -ENOSPC;
- goto unlock;
- }
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp)
+ return -ENOMEM;
+ vxlanp->udp_port = port;
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
- if (ret)
- goto unlock;
-
- vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
- if (!vxlanp) {
- ret = -ENOMEM;
- goto err_delete_port;
+ if (ret) {
+ kfree(vxlanp);
+ return ret;
}
- vxlanp->udp_port = port;
- refcount_set(&vxlanp->refcount, 1);
-
+ mutex_lock(&vxlan->sync_lock);
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
-
- vxlan->num_ports++;
mutex_unlock(&vxlan->sync_lock);
- return 0;
-
-err_delete_port:
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
-unlock:
- mutex_unlock(&vxlan->sync_lock);
- return ret;
+ return 0;
}
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
@@ -161,18 +130,15 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
mutex_lock(&vxlan->sync_lock);
vxlanp = vxlan_lookup_port(vxlan, port);
- if (!vxlanp) {
+ if (WARN_ON(!vxlanp)) {
ret = -ENOENT;
goto out_unlock;
}
- if (refcount_dec_and_test(&vxlanp->refcount)) {
- hash_del_rcu(&vxlanp->hlist);
- synchronize_rcu();
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
- kfree(vxlanp);
- vxlan->num_ports--;
- }
+ hash_del_rcu(&vxlanp->hlist);
+ synchronize_rcu();
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
out_unlock:
mutex_unlock(&vxlan->sync_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index 6d599f4a8acd..ec766529f49b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -37,6 +37,11 @@
struct mlx5_vxlan;
struct mlx5_vxlan_port;
+static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
+}
+
static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{
/* not allowed reason is encoded in vxlan pointer as error,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 5ddd18639a1e..a4a23a27c368 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
+#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
@@ -73,15 +74,45 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
+static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct rb_root *root;
+ int err;
+
+ root = xa_load(&dev->priv.page_root_xa, func_id);
+ if (root)
+ return root;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+
+ err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+ if (err) {
+ kfree(root);
+ return ERR_PTR(err);
+ }
+
+ *root = RB_ROOT;
+
+ return root;
+}
+
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
- struct rb_root *root = &dev->priv.page_root;
- struct rb_node **new = &root->rb_node;
struct rb_node *parent = NULL;
+ struct rb_root *root;
+ struct rb_node **new;
struct fw_page *nfp;
struct fw_page *tfp;
int i;
+ root = page_root_per_func_id(dev, func_id);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ new = &root->rb_node;
+
while (*new) {
parent = *new;
tfp = rb_entry(parent, struct fw_page, rb_node);
@@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
return 0;
}
-static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
+ u32 func_id)
{
- struct rb_root *root = &dev->priv.page_root;
- struct rb_node *tmp = root->rb_node;
struct fw_page *result = NULL;
+ struct rb_root *root;
+ struct rb_node *tmp;
struct fw_page *tfp;
+ root = xa_load(&dev->priv.page_root_xa, func_id);
+ if (WARN_ON_ONCE(!root))
+ return NULL;
+
+ tmp = root->rb_node;
+
while (tmp) {
tfp = rb_entry(tmp, struct fw_page, rb_node);
if (tfp->addr < addr) {
@@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
bool in_free_list)
{
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ struct rb_root *root;
+
+ root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+ if (WARN_ON_ONCE(!root))
+ return;
+
+ rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
@@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
-static void free_4k(struct mlx5_core_dev *dev, u64 addr)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
@@ -340,7 +384,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
out_free:
kvfree(in);
if (notify_fail)
@@ -351,16 +395,19 @@ out_free:
static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
bool ec_function)
{
+ struct rb_root *root;
struct rb_node *p;
int npages = 0;
- p = rb_first(&dev->priv.page_root);
+ root = xa_load(&dev->priv.page_root_xa, func_id);
+ if (WARN_ON_ONCE(!root))
+ return;
+
+ p = rb_first(root);
while (p) {
struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p);
- if (fwp->func_id != func_id)
- continue;
npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
free_fwp(dev, fwp, fwp->free_count);
}
@@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
+ struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
u32 func_id;
@@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
- p = rb_first(&dev->priv.page_root);
+ root = xa_load(&dev->priv.page_root_xa, func_id);
+ if (WARN_ON_ONCE(!root))
+ return -EEXIST;
+
+ p = rb_first(root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p);
- if (fwp->func_id != func_id)
- continue;
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
i++;
@@ -430,7 +480,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
+ mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
+ func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
@@ -446,7 +497,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
if (nclaimed)
*nclaimed = num_claimed;
@@ -560,35 +611,49 @@ static int optimal_reclaimed_pages(void)
return ret;
}
-int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
+static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
+ struct rb_root *root, u16 func_id)
{
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
- struct fw_page *fwp;
- struct rb_node *p;
- int nclaimed = 0;
- int err = 0;
- do {
- p = rb_first(&dev->priv.page_root);
- if (p) {
- fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed, mlx5_core_is_ecpf(dev));
-
- if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
- err);
- return err;
- }
- if (nclaimed)
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ while (!RB_EMPTY_ROOT(root)) {
+ int nclaimed;
+ int err;
+
+ err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
+ &nclaimed, mlx5_core_is_ecpf(dev));
+ if (err) {
+ mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
+ err, func_id);
+ return err;
}
+
+ if (nclaimed)
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
break;
}
- } while (p);
+ }
+
+ return 0;
+}
+
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
+{
+ struct rb_root *root;
+ unsigned long id;
+ void *entry;
+
+ xa_for_each(&dev->priv.page_root_xa, id, entry) {
+ root = entry;
+ mlx5_reclaim_root_pages(dev, root, id);
+ xa_erase(&dev->priv.page_root_xa, id);
+ kfree(root);
+ }
+
+ WARN_ON(!xa_empty(&dev->priv.page_root_xa));
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
@@ -605,17 +670,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
- dev->priv.page_root = RB_ROOT;
INIT_LIST_HEAD(&dev->priv.free_list);
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
if (!dev->priv.pg_wq)
return -ENOMEM;
+ xa_init(&dev->priv.page_root_xa);
+
return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
+ xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 31abcbb95ca2..c63f727273d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -395,7 +395,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
/* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) {
- mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
+ mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP;
}
}
@@ -474,14 +474,13 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
int ret;
next_matcher = NULL;
- if (!list_empty(&tbl->matcher_list))
- list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
- if (tmp_matcher->prio >= matcher->prio) {
- next_matcher = tmp_matcher;
- break;
- }
- first = false;
+ list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
+ if (tmp_matcher->prio >= matcher->prio) {
+ next_matcher = tmp_matcher;
+ break;
}
+ first = false;
+ }
prev_matcher = NULL;
if (next_matcher && !first)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index cd708dcc2e3a..6ec5106bc472 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -574,9 +574,8 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
{
struct mlx5dr_rule_member *rule_mem;
- if (!list_empty(&ste->rule_list))
- list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
- rule_mem->ste = new_ste;
+ list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
+ rule_mem->ste = new_ste;
}
static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 8887b2440c7d..9b08eb557a31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -279,29 +279,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
- * TX: push vlan -> modify header -> encap
+ * TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- }
-
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- }
-
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@@ -354,6 +334,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] =
fte->action.modify_hdr->action.dr_action;
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
if (delay_encap_set)
actions[num_actions++] =
fte->action.pkt_reformat->action.dr_action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 88cdb9bb4c4a..bdafc85fd874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -110,7 +110,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
break;
- /* fall through */
+ fallthrough;
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b01f8f2fab63..08d101138fbe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1177,14 +1177,15 @@ static void mlxsw_devlink_trap_fini(struct devlink *devlink,
static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
const struct devlink_trap *trap,
- enum devlink_trap_action action)
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->trap_action_set)
return -EOPNOTSUPP;
- return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
}
static int
@@ -1202,14 +1203,15 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink,
static int
mlxsw_devlink_trap_group_set(struct devlink *devlink,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer)
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->trap_group_set)
return -EOPNOTSUPP;
- return mlxsw_driver->trap_group_set(mlxsw_core, group, policer);
+ return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
}
static int
@@ -1815,7 +1817,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
- kfree(trans);
+ kfree_rcu(trans, rcu);
return err;
}
return 0;
@@ -2053,11 +2055,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
- rcu_read_unlock();
- if (!found)
+ if (!found) {
+ rcu_read_unlock();
goto drop;
+ }
rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
return;
drop:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c1c1e039323a..11af3308f8cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -89,13 +89,15 @@ struct mlxsw_listener {
};
#define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
- _dis_action, _enabled_on_register, _dis_trap_group) \
+ _dis_action, _enabled_on_register, _dis_trap_group, \
+ _mirror_reason) \
{ \
.trap_id = MLXSW_TRAP_ID_##_trap_id, \
.rx_listener = \
{ \
.func = _func, \
.local_port = MLXSW_PORT_DONT_CARE, \
+ .mirror_reason = _mirror_reason, \
.trap_id = MLXSW_TRAP_ID_##_trap_id, \
}, \
.en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \
@@ -109,12 +111,17 @@ struct mlxsw_listener {
#define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
_dis_action) \
__MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
- _dis_action, true, _trap_group)
+ _dis_action, true, _trap_group, 0)
#define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
_dis_action, _dis_trap_group) \
__MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
- _dis_action, false, _dis_trap_group)
+ _dis_action, false, _dis_trap_group, 0)
+
+#define MLXSW_RXL_MIRROR(_func, _session_id, _trap_group, _mirror_reason) \
+ __MLXSW_RXL(_func, MIRROR_SESSION##_session_id, TRAP_TO_CPU, false, \
+ _trap_group, TRAP_TO_CPU, true, _trap_group, \
+ _mirror_reason)
#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
{ \
@@ -326,12 +333,14 @@ struct mlxsw_driver {
const struct devlink_trap *trap, void *trap_ctx);
int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
int (*trap_group_set)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer);
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
int (*trap_policer_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer);
void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index a7d86df7123f..44fa02cbb683 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -11,7 +11,7 @@
#include "reg.h"
static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
- bool *qsfp)
+ bool *qsfp, bool *cmis)
{
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -25,15 +25,19 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
return err;
mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
ident = eeprom_tmp[0];
+ *cmis = false;
switch (ident) {
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
*qsfp = false;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
+ *qsfp = true;
+ break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
*qsfp = true;
+ *cmis = true;
break;
default:
return -EINVAL;
@@ -70,8 +74,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
if (qsfp) {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 128. Please refer to "QSFP+ Memory Map"
- * figure in SFF-8436 specification for graphical
- * depiction.
+ * figure in SFF-8436 specification and to "CMIS Module
+ * Memory Map" figure in CMIS specification for
+ * graphical depiction.
*/
page = MLXSW_REG_MCIA_PAGE_GET(offset);
offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
@@ -116,7 +121,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned int module_temp;
- bool qsfp;
+ bool qsfp, cmis;
+ int page;
int err;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
@@ -140,21 +146,28 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
*/
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(core, module, &qsfp);
+ err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis);
if (err)
return err;
- if (qsfp)
- mlxsw_reg_mcia_pack(mcia_pl, module, 0,
- MLXSW_REG_MCIA_TH_PAGE_NUM,
+ if (qsfp) {
+ /* For QSFP/CMIS module-defined thresholds are located in page
+ * 02h, otherwise in page 03h.
+ */
+ if (cmis)
+ page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
+ else
+ page = MLXSW_REG_MCIA_TH_PAGE_NUM;
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
- else
+ } else {
mlxsw_reg_mcia_pack(mcia_pl, module, 0,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
+ }
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -221,6 +234,22 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
else
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ /* Use SFF_8636 as base type. ethtool should recognize specific
+ * type through the identifier value.
+ */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ /* Verify if module EEPROM is a flat memory. In case of flat
+ * memory only page 00h (0-255 bytes) can be read. Otherwise
+ * upper pages 01h and 02h can also be read. Upper pages 10h
+ * and 11h are currently not supported by the driver.
+ */
+ if (module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID] &
+ MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ else
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
default:
return -EINVAL;
}
@@ -235,8 +264,8 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
{
int offset = ee->offset;
unsigned int read_size;
+ bool qsfp, cmis;
int i = 0;
- bool qsfp;
int err;
if (!ee->len)
@@ -244,7 +273,7 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
memset(data, 0, ee->len);
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp);
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 3c5b25495751..079b080de7f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5595,6 +5595,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
@@ -5613,6 +5614,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
@@ -8605,8 +8607,10 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0
#define MLXSW_REG_MCIA_TH_ITEM_SIZE 2
#define MLXSW_REG_MCIA_TH_PAGE_NUM 3
+#define MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM 2
#define MLXSW_REG_MCIA_PAGE0_LO 0
#define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80
+#define MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY BIT(7)
enum mlxsw_reg_mcia_eeprom_module_info_rev_id {
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
@@ -8625,6 +8629,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id {
enum mlxsw_reg_mcia_eeprom_module_info {
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 519eb44e4097..fdf9aa8314b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3055,6 +3055,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
@@ -3084,6 +3085,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3111,6 +3113,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6ab1b6d725af..f9ba59641b4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -177,6 +177,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_ptp_ops *ptp_ops;
const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_sp_policer_core_ops *policer_core_ops;
+ const struct mlxsw_sp_trap_ops *trap_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
@@ -983,6 +984,10 @@ struct mlxsw_sp_mall_mirror_entry {
int span_id;
};
+struct mlxsw_sp_mall_trap_entry {
+ int span_id;
+};
+
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
@@ -991,6 +996,7 @@ struct mlxsw_sp_mall_entry {
bool ingress;
union {
struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_mall_trap_entry trap;
struct mlxsw_sp_port_sample sample;
};
struct rcu_head rcu;
@@ -1177,12 +1183,14 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx);
int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer);
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
int
mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer);
@@ -1196,6 +1204,8 @@ int
mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id);
static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 7974982533b5..b65b93a2b9bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -121,7 +121,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool;
unsigned int map_size;
int err;
@@ -131,9 +130,9 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
if (!pool)
return -ENOMEM;
mlxsw_sp->counter_pool = pool;
- memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
- sub_pools_count * sizeof(*sub_pool));
pool->sub_pools_count = sub_pools_count;
+ memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
+ flex_array_size(pool, sub_pools, pool->sub_pools_count));
spin_lock_init(&pool->counter_pool_lock);
atomic_set(&pool->active_entries_count, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index a5ce1eec5418..964fd444bb10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1289,19 +1289,18 @@ struct mlxsw_sp_qevent_binding {
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
-static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ const struct mlxsw_sp_span_agent_parms *agent_parms,
+ int *p_span_id)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
- struct mlxsw_sp_span_agent_parms agent_parms = {
- .to_dev = mall_entry->mirror.to_dev,
- };
int span_id;
int err;
- err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms);
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
if (err)
return err;
@@ -1320,7 +1319,7 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_trigger_enable;
- mall_entry->mirror.span_id = span_id;
+ *p_span_id = span_id;
return 0;
err_trigger_enable:
@@ -1333,13 +1332,13 @@ err_analyzed_port_get:
return err;
}
-static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ int span_id)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
- .span_id = mall_entry->mirror.span_id,
+ .span_id = span_id,
};
mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
@@ -1347,7 +1346,51 @@ static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
- mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+}
+
+static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = mall_entry->mirror.to_dev,
+ };
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->mirror.span_id);
+}
+
+static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {};
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ &agent_parms.policer_enable,
+ &agent_parms.policer_id);
+ if (err)
+ return err;
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->trap.span_id);
+}
+
+static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
@@ -1357,6 +1400,8 @@ static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
default:
/* This should have been validated away. */
WARN_ON(1);
@@ -1371,6 +1416,8 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
default:
WARN_ON(1);
return;
@@ -1490,6 +1537,8 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
if (act->id == FLOW_ACTION_MIRRED) {
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_TRAP) {
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
} else {
NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 019ed503aadf..0521e9d48c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
- /* Packets with link-local destination IP arriving to the router
- * are trapped to the CPU, so no need to program specific routes
- * for them. Only allow prefix routes (usually one fe80::/64) so
- * that packets are trapped for the right reason.
- */
- if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
- (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
- return true;
-
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
@@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
- err = register_inetaddr_notifier(&router->inetaddr_nb);
- if (err)
- goto err_register_inetaddr_notifier;
-
- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
- err = register_inet6addr_notifier(&router->inet6addr_nb);
- if (err)
- goto err_register_inet6addr_notifier;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_init;
- mlxsw_sp->router->netevent_nb.notifier_call =
- mlxsw_sp_router_netevent_event;
- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
if (err)
goto err_mp_hash_init;
@@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
-err_dscp_init:
-err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+err_dscp_init:
+err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -8174,10 +8170,6 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- unregister_inet6addr_notifier(&router->inet6addr_nb);
-err_register_inet6addr_notifier:
- unregister_inetaddr_notifier(&router->inetaddr_nb);
-err_register_inetaddr_notifier:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 323eaf979aea..5c959a995199 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -837,7 +837,8 @@ static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
{
- refcount_dec(&span->policer_id_base_ref_count);
+ if (refcount_dec_and_test(&span->policer_id_base_ref_count))
+ span->policer_id_base = 0;
}
static struct mlxsw_sp_span_entry *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 157a42c63066..2e41c5519c1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -21,6 +21,7 @@ struct mlxsw_sp_trap_group_item {
struct devlink_trap_group group;
u16 hw_group_id;
u8 priority;
+ u8 fixed_policer:1; /* Whether policer binding can change */
};
#define MLXSW_SP_TRAP_LISTENERS_MAX 3
@@ -28,6 +29,7 @@ struct mlxsw_sp_trap_group_item {
struct mlxsw_sp_trap_item {
struct devlink_trap trap;
struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX];
+ u8 is_source:1;
};
/* All driver-specific traps must be documented in
@@ -46,6 +48,11 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+enum {
+ /* Packet was early dropped. */
+ MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9,
+};
+
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
u8 local_port,
struct mlxsw_sp_port *mlxsw_sp_port)
@@ -222,6 +229,11 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA | (_metadata))
+#define MLXSW_SP_TRAP_BUFFER_DROP(_id) \
+ DEVLINK_TRAP_GENERIC(DROP, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
DEVLINK_MLXSW_TRAP_NAME_##_id, \
@@ -248,6 +260,10 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \
SET_FW_DEFAULT, SP_##_dis_group_id)
+#define MLXSW_SP_RXL_BUFFER_DISCARD(_mirror_reason) \
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_drop_listener, 0, SP_BUFFER_DISCARDS, \
+ MLXSW_SP_MIRROR_REASON_##_mirror_reason)
+
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \
_action, false, SP_##_group_id, SET_FW_DEFAULT)
@@ -328,6 +344,12 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(20, 10240, 4096),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -422,6 +444,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ .priority = 1,
+ },
+ {
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
.priority = 2,
@@ -882,11 +909,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+ TRAP_TO_CPU, false),
},
},
{
@@ -1055,10 +1082,10 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp)
{
+ size_t arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item);
- u64 arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
- u64 free_policers = 0;
+ size_t free_policers = 0;
u32 last_id;
int i;
@@ -1151,6 +1178,43 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
+static int mlxsw_sp_trap_group_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+ const struct mlxsw_sp_trap_group_item *spec_group_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_group_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t groups_count, spec_groups_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->groups_init(mlxsw_sp, &spec_group_items_arr,
+ &spec_groups_count);
+ if (err)
+ return err;
+
+ /* The group items array is created by concatenating the common trap
+ * group items and the ASIC-specific trap group items.
+ */
+ groups_count = common_groups_count + spec_groups_count;
+ trap->group_items_arr = kcalloc(groups_count, elem_size, GFP_KERNEL);
+ if (!trap->group_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->group_items_arr, mlxsw_sp_trap_group_items_arr,
+ elem_size * common_groups_count);
+ memcpy(trap->group_items_arr + common_groups_count,
+ spec_group_items_arr, elem_size * spec_groups_count);
+
+ trap->groups_count = groups_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_group_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->group_items_arr);
+}
+
static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -1158,13 +1222,9 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
int err, i;
- trap->group_items_arr = kmemdup(mlxsw_sp_trap_group_items_arr,
- sizeof(mlxsw_sp_trap_group_items_arr),
- GFP_KERNEL);
- if (!trap->group_items_arr)
- return -ENOMEM;
-
- trap->groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+ err = mlxsw_sp_trap_group_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
@@ -1181,7 +1241,7 @@ err_trap_group_register:
group_item = &trap->group_items_arr[i];
devlink_trap_groups_unregister(devlink, &group_item->group, 1);
}
- kfree(trap->group_items_arr);
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
}
@@ -1197,7 +1257,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
group_item = &trap->group_items_arr[i];
devlink_trap_groups_unregister(devlink, &group_item->group, 1);
}
- kfree(trap->group_items_arr);
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
static bool
@@ -1206,6 +1266,43 @@ mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener)
return listener->trap_id != 0;
}
+static int mlxsw_sp_trap_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+ const struct mlxsw_sp_trap_item *spec_trap_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t traps_count, spec_traps_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->traps_init(mlxsw_sp, &spec_trap_items_arr,
+ &spec_traps_count);
+ if (err)
+ return err;
+
+ /* The trap items array is created by concatenating the common trap
+ * items and the ASIC-specific trap items.
+ */
+ traps_count = common_traps_count + spec_traps_count;
+ trap->trap_items_arr = kcalloc(traps_count, elem_size, GFP_KERNEL);
+ if (!trap->trap_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->trap_items_arr, mlxsw_sp_trap_items_arr,
+ elem_size * common_traps_count);
+ memcpy(trap->trap_items_arr + common_traps_count,
+ spec_trap_items_arr, elem_size * spec_traps_count);
+
+ trap->traps_count = traps_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->trap_items_arr);
+}
+
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -1213,13 +1310,9 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
int err, i;
- trap->trap_items_arr = kmemdup(mlxsw_sp_trap_items_arr,
- sizeof(mlxsw_sp_trap_items_arr),
- GFP_KERNEL);
- if (!trap->trap_items_arr)
- return -ENOMEM;
-
- trap->traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+ err = mlxsw_sp_trap_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
@@ -1236,7 +1329,7 @@ err_trap_register:
trap_item = &trap->trap_items_arr[i];
devlink_traps_unregister(devlink, &trap_item->trap, 1);
}
- kfree(trap->trap_items_arr);
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
}
@@ -1252,7 +1345,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
trap_item = &trap->trap_items_arr[i];
devlink_traps_unregister(devlink, &trap_item->trap, 1);
}
- kfree(trap->trap_items_arr);
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -1344,7 +1437,8 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action)
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
const struct mlxsw_sp_trap_item *trap_item;
@@ -1354,6 +1448,11 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
if (WARN_ON(!trap_item))
return -EINVAL;
+ if (trap_item->is_source) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the action of source traps is not supported");
+ return -EOPNOTSUPP;
+ }
+
for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
bool enabled;
@@ -1384,7 +1483,7 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
static int
__mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- u32 policer_id)
+ u32 policer_id, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
@@ -1395,6 +1494,11 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
if (WARN_ON(!group_item))
return -EINVAL;
+ if (group_item->fixed_policer && policer_id != group->init_policer_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the policer binding of this group is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (policer_id) {
struct mlxsw_sp_trap_policer_item *policer_item;
@@ -1414,16 +1518,18 @@ int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group)
{
return __mlxsw_sp_trap_group_init(mlxsw_core, group,
- group->init_policer_id);
+ group->init_policer_id, NULL);
}
int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer)
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
{
u32 policer_id = policer ? policer->id : 0;
- return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id,
+ extack);
}
static int
@@ -1568,3 +1674,110 @@ mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
return 0;
}
+
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id)
+{
+ struct mlxsw_sp_trap_policer_item *pol_item;
+ struct mlxsw_sp_trap_group_item *gr_item;
+ u32 pol_id;
+
+ gr_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, id);
+ if (!gr_item)
+ return -ENOENT;
+
+ pol_id = gr_item->group.init_policer_id;
+ if (!pol_id) {
+ *p_enabled = false;
+ return 0;
+ }
+
+ pol_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, pol_id);
+ if (WARN_ON(!pol_item))
+ return -ENOENT;
+
+ *p_enabled = true;
+ *p_hw_id = pol_item->hw_id;
+ return 0;
+}
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp1_trap_group_items_arr[] = {
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp1_trap_items_arr[] = {
+};
+
+static int
+mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp1_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp1_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp1_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp1_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp1_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = {
+ .groups_init = mlxsw_sp1_trap_groups_init,
+ .traps_init = mlxsw_sp1_traps_init,
+};
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp2_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 20),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
+ .priority = 0,
+ .fixed_policer = true,
+ },
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp2_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_BUFFER_DROP(EARLY_DROP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_BUFFER_DISCARD(INGRESS_WRED),
+ },
+ .is_source = true,
+ },
+};
+
+static int
+mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp2_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp2_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp2_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp2_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp2_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops = {
+ .groups_init = mlxsw_sp2_trap_groups_init,
+ .traps_init = mlxsw_sp2_traps_init,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
index 13ac412f4d53..b8df684bedef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -9,13 +9,13 @@
struct mlxsw_sp_trap {
struct mlxsw_sp_trap_policer_item *policer_items_arr;
- u64 policers_count; /* Number of registered policers */
+ size_t policers_count; /* Number of registered policers */
struct mlxsw_sp_trap_group_item *group_items_arr;
- u64 groups_count; /* Number of registered groups */
+ size_t groups_count; /* Number of registered groups */
struct mlxsw_sp_trap_item *trap_items_arr;
- u64 traps_count; /* Number of registered traps */
+ size_t traps_count; /* Number of registered traps */
u16 thin_policer_hw_id;
@@ -23,4 +23,16 @@ struct mlxsw_sp_trap {
unsigned long policers_usage[]; /* Usage bitmap */
};
+struct mlxsw_sp_trap_ops {
+ int (*groups_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count);
+ int (*traps_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count);
+};
+
+extern const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops;
+extern const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops;
+
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index f2d94b026d88..867c680f5917 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -497,21 +497,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index d2708a57f2ff..4075f5e59955 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
err = nixge_of_get_resources(pdev);
if (err)
- return err;
+ goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
- return priv->tx_irq;
+ err = priv->tx_irq;
+ goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
- return priv->rx_irq;
+ err = priv->rx_irq;
+ goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 095561924bdc..3c57c331729f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -403,8 +403,7 @@ static int ionic_get_coalesce(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- /* Tx uses Rx interrupt */
- coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
return 0;
@@ -417,7 +416,8 @@ static int ionic_set_coalesce(struct net_device *netdev,
struct ionic_identity *ident;
struct ionic_qcq *qcq;
unsigned int i;
- u32 coal;
+ u32 rx_coal;
+ u32 tx_coal;
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
@@ -426,26 +426,31 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EIO;
}
- /* Tx uses Rx interrupt, so only change Rx */
- if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
+ /* Tx normally shares Rx interrupt, so only change Rx */
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
+ coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
netdev_warn(netdev, "only the rx-usecs can be changed\n");
return -EINVAL;
}
- /* Convert the usec request to a HW useable value. If they asked
+ /* Convert the usec request to a HW usable value. If they asked
* for non-zero and it resolved to zero, bump it up
*/
- coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
- if (!coal && coalesce->rx_coalesce_usecs)
- coal = 1;
-
- if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
+ if (!rx_coal && coalesce->rx_coalesce_usecs)
+ rx_coal = 1;
+ tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs);
+ if (!tx_coal && coalesce->tx_coalesce_usecs)
+ tx_coal = 1;
+
+ if (rx_coal > IONIC_INTR_CTRL_COAL_MAX ||
+ tx_coal > IONIC_INTR_CTRL_COAL_MAX)
return -ERANGE;
- /* Save the new value */
+ /* Save the new values */
lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (coal != lif->rx_coalesce_hw) {
- lif->rx_coalesce_hw = coal;
+ if (rx_coal != lif->rx_coalesce_hw) {
+ lif->rx_coalesce_hw = rx_coal;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
@@ -457,6 +462,23 @@ static int ionic_set_coalesce(struct net_device *netdev,
}
}
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
+ else
+ lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
+ if (tx_coal != lif->tx_coalesce_hw) {
+ lif->tx_coalesce_hw = tx_coal;
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ qcq = lif->txqcqs[i].qcq;
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ qcq->intr.index,
+ lif->tx_coalesce_hw);
+ }
+ }
+ }
+
return 0;
}
@@ -510,29 +532,63 @@ static void ionic_get_channels(struct net_device *netdev,
/* report maximum channels */
ch->max_combined = lif->ionic->ntxqs_per_lif;
+ ch->max_rx = lif->ionic->ntxqs_per_lif / 2;
+ ch->max_tx = lif->ionic->ntxqs_per_lif / 2;
/* report current channels */
- ch->combined_count = lif->nxqs;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ ch->rx_count = lif->nxqs;
+ ch->tx_count = lif->nxqs;
+ } else {
+ ch->combined_count = lif->nxqs;
+ }
}
static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
{
struct ethtool_channels *ch = arg;
- lif->nxqs = ch->combined_count;
+ if (ch->combined_count) {
+ lif->nxqs = ch->combined_count;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ netdev_info(lif->netdev, "Sharing queue interrupts\n");
+ }
+ } else {
+ lif->nxqs = ch->rx_count;
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ netdev_info(lif->netdev, "Splitting queue interrupts\n");
+ }
+ }
}
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ int new_cnt;
- if (!ch->combined_count || ch->other_count ||
- ch->rx_count || ch->tx_count)
+ if (ch->rx_count != ch->tx_count) {
+ netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
+ }
- if (ch->combined_count == lif->nxqs)
- return 0;
+ if (ch->combined_count && ch->rx_count) {
+ netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
+ return -EINVAL;
+ }
+
+ if (ch->combined_count)
+ new_cnt = ch->combined_count;
+ else
+ new_cnt = ch->rx_count;
+
+ if (lif->nxqs != new_cnt)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, new_cnt);
return ionic_reset_queues(lif, ionic_set_queuecount, ch);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 728dd6429d80..1944bf5264db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -616,7 +616,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
- .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
@@ -624,14 +623,22 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
},
};
+ unsigned int intr_index;
int err;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ intr_index = qcq->intr.index;
+ else
+ intr_index = lif->rxqcqs[q->index].qcq->intr.index;
+ ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
+
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
+ dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
@@ -648,6 +655,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
+ NAPI_POLL_WEIGHT);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -684,6 +695,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
+ dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
@@ -700,8 +712,12 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
- netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
- NAPI_POLL_WEIGHT);
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
+ NAPI_POLL_WEIGHT);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -1537,6 +1553,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ flags |= IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
@@ -1547,6 +1565,11 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
if (err)
goto err_out;
+ if (flags & IONIC_QCQ_F_INTR)
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i].qcq->intr.index,
+ lif->tx_coalesce_hw);
+
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
}
@@ -1562,13 +1585,15 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
if (err)
goto err_out;
- lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
-
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i].qcq->intr.index,
lif->rx_coalesce_hw);
- ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
- lif->txqcqs[i].qcq);
+
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
+ lif->txqcqs[i].qcq);
+
+ lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
}
@@ -2002,7 +2027,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
if (err)
- return err;
+ goto reset_out;
}
if (cb)
@@ -2012,6 +2037,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
err = ionic_open(lif->netdev);
netif_device_attach(lif->netdev);
}
+
+reset_out:
mutex_unlock(&lif->queue_lock);
return err;
@@ -2063,11 +2090,14 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->index = index;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+ lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
/* Convert the default coalesce value to actual hw resolution */
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
lif->rx_coalesce_usecs);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index edad17d7aeeb..1ee3b14c8d50 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -13,6 +13,7 @@
#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
#define IONIC_RX_COPYBREAK_DEFAULT 256
+#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
u64 dma_map_err;
@@ -136,6 +137,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
+ IONIC_LIF_F_SPLIT_INTR,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -176,6 +178,7 @@ struct ionic_lif {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u32 rx_copybreak;
+ u32 tx_budget;
unsigned int rx_mode;
u64 hw_features;
bool mc_overflow;
@@ -203,6 +206,8 @@ struct ionic_lif {
struct dentry *dentry;
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
+ u32 tx_coalesce_usecs; /* what the user asked for */
+ u32 tx_coalesce_hw; /* what the hw is using */
struct work_struct tx_timeout_work;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 85eb8f276a37..8107d32c2767 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -15,6 +15,10 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_cq_info *cq_info,
void *cb_arg);
+static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+
+static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -249,29 +253,13 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
-{
- u32 work_done = 0;
-
- while (ionic_rx_service(rxcq, rxcq->tail)) {
- if (rxcq->tail->last)
- rxcq->done_color = !rxcq->done_color;
- rxcq->tail = rxcq->tail->next;
- DEBUG_STATS_CQE_CNT(rxcq);
-
- if (++work_done >= limit)
- break;
- }
-
- return work_done;
-}
-
void ionic_rx_flush(struct ionic_cq *cq)
{
struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_rx_walk_cq(cq, cq->num_descs);
+ work_done = ionic_cq_service(cq, cq->num_descs,
+ ionic_rx_service, NULL, NULL);
if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
@@ -331,9 +319,6 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
__free_page(page);
}
-#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
-#define IONIC_RX_RING_HEAD_BUF_SZ 2048
-
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
@@ -345,7 +330,6 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int remain_len;
unsigned int seg_len;
unsigned int nfrags;
- bool ring_doorbell;
unsigned int i, j;
unsigned int len;
@@ -360,9 +344,7 @@ void ionic_rx_fill(struct ionic_queue *q)
page_info = &desc_info->pages[0];
if (page_info->page) { /* recycle the buffer */
- ring_doorbell = ((q->head->index + 1) &
- IONIC_RX_RING_DOORBELL_STRIDE) == 0;
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false, ionic_rx_clean, NULL);
continue;
}
@@ -401,10 +383,11 @@ void ionic_rx_fill(struct ionic_queue *q)
page_info++;
}
- ring_doorbell = ((q->head->index + 1) &
- IONIC_RX_RING_DOORBELL_STRIDE) == 0;
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
+
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head->index);
}
static void ionic_rx_fill_cb(void *arg)
@@ -436,40 +419,117 @@ void ionic_rx_empty(struct ionic_queue *q)
}
}
+int ionic_tx_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *cq = napi_to_cq(napi);
+ struct ionic_dev *idev;
+ struct ionic_lif *lif;
+ u32 work_done = 0;
+ u32 flags = 0;
+
+ lif = cq->bound_q->lif;
+ idev = &lif->ionic->idev;
+
+ work_done = ionic_cq_service(cq, budget,
+ ionic_tx_service, NULL, NULL);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ cq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *cq = napi_to_cq(napi);
+ struct ionic_dev *idev;
+ struct ionic_lif *lif;
+ u32 work_done = 0;
+ u32 flags = 0;
+
+ lif = cq->bound_q->lif;
+ idev = &lif->ionic->idev;
+
+ work_done = ionic_cq_service(cq, budget,
+ ionic_rx_service, NULL, NULL);
+
+ if (work_done)
+ ionic_rx_fill(cq->bound_q);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ cq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
+int ionic_txrx_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ u32 rx_work_done = 0;
+ u32 tx_work_done = 0;
u32 work_done = 0;
u32 flags = 0;
+ bool unmask;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi].qcq->cq;
- ionic_tx_flush(txcq);
+ tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
+ ionic_tx_service, NULL, NULL);
- work_done = ionic_rx_walk_cq(rxcq, budget);
-
- if (work_done)
+ rx_work_done = ionic_cq_service(rxcq, budget,
+ ionic_rx_service, NULL, NULL);
+ if (rx_work_done)
ionic_rx_fill_cb(rxcq->bound_q);
- if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget);
+
+ if (unmask && napi_complete_done(napi, rx_work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ work_done = rx_work_done;
+ } else {
+ work_done = budget;
}
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
- work_done, flags);
+ tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
+ DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
+ DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
return work_done;
}
@@ -557,43 +617,39 @@ static void ionic_tx_clean(struct ionic_queue *q,
}
}
-void ionic_tx_flush(struct ionic_cq *cq)
+static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
{
- struct ionic_txq_comp *comp = cq->tail->cq_desc;
- struct ionic_dev *idev = &cq->lif->ionic->idev;
+ struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
- unsigned int work_done = 0;
-
- /* walk the completed cq entries */
- while (work_done < cq->num_descs &&
- color_match(comp->color, cq->done_color)) {
-
- /* clean the related q entries, there could be
- * several q entries completed for each cq completion
- */
- do {
- desc_info = q->tail;
- q->tail = desc_info->next;
- ionic_tx_clean(q, desc_info, cq->tail,
- desc_info->cb_arg);
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
- } while (desc_info->index != le16_to_cpu(comp->comp_index));
-
- if (cq->tail->last)
- cq->done_color = !cq->done_color;
-
- cq->tail = cq->tail->next;
- comp = cq->tail->cq_desc;
- DEBUG_STATS_CQE_CNT(cq);
-
- work_done++;
- }
+ if (!color_match(comp->color, cq->done_color))
+ return false;
+
+ /* clean the related q entries, there could be
+ * several q entries completed for each cq completion
+ */
+ do {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+ ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ } while (desc_info->index != le16_to_cpu(comp->comp_index));
+
+ return true;
+}
+
+void ionic_tx_flush(struct ionic_cq *cq)
+{
+ struct ionic_dev *idev = &cq->lif->ionic->idev;
+ u32 work_done;
+
+ work_done = ionic_cq_service(cq, cq->num_descs,
+ ionic_tx_service, NULL, NULL);
if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, 0);
+ work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
void ionic_tx_empty(struct ionic_queue *q)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 71973e3c35a6..a5883be0413f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -11,6 +11,8 @@ void ionic_rx_fill(struct ionic_queue *q);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
+int ionic_tx_napi(struct napi_struct *napi, int budget);
+int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index f8efd36d66e0..b83d17b14e85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -268,8 +268,10 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
chain->pbl.pp_addr_tbl = addr_tbl;
- if (chain->b_external_pbl)
+ if (chain->b_external_pbl) {
+ pbl_virt = chain->pbl_sp.table_virt;
goto alloc_pages;
+ }
size = array_size(page_cnt, sizeof(*pbl_virt));
if (unlikely(size == SIZE_MAX))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 988d84564849..5be08f83e0aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2518,11 +2518,10 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
- "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
- info->mac[0], info->mac[1], info->mac[2],
- info->mac[3], info->mac[4], info->mac[5],
+ info->mac,
info->wwn_port, info->wwn_node,
info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index aa215eeeb4df..f1f75b6d0421 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3276,14 +3276,12 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
vf->abs_vf_id, params.opcode, params.type,
params.is_rx_filter ? "RX" : "",
params.is_tx_filter ? "TX" : "",
params.vport_to_add_to,
- params.mac[0], params.mac[1],
- params.mac[2], params.mac[3],
- params.mac[4], params.mac[5], params.vlan);
+ params.mac, params.vlan);
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
@@ -5040,8 +5038,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(vf->shadow_config.macs[i],
vf_info->mac)) {
- memset(vf->shadow_config.macs[i], 0,
- ETH_ALEN);
+ eth_zero_addr(vf->shadow_config.macs[i]);
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
vf_info->mac, vf_id);
@@ -5050,7 +5047,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
}
ether_addr_copy(vf_info->mac, force_mac);
- memset(vf_info->forced_mac, 0, ETH_ALEN);
+ eth_zero_addr(vf_info->forced_mac);
vf->bulletin.p_virt->valid_bitmap &=
~BIT(MAC_ADDR_FORCED);
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
@@ -5061,7 +5058,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
if (!vf_info->is_trusted_configured) {
u8 empty_mac[ETH_ALEN];
- memset(empty_mac, 0, ETH_ALEN);
+ eth_zero_addr(empty_mac);
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(vf->shadow_config.macs[i],
empty_mac)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 1aaae3203f5a..140a392a81bb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -144,9 +144,7 @@ static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
{
struct qede_dev *edev = netdev_priv(ndev);
- DP_VERBOSE(edev, QED_MSG_IOV,
- "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
if (!is_valid_ether_addr(mac)) {
DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
@@ -2676,8 +2674,8 @@ static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
data->feat_flags |= QED_TLV_LSO;
ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
- memset(data->mac[1], 0, ETH_ALEN);
- memset(data->mac[2], 0, ETH_ALEN);
+ eth_zero_addr(data->mac[1]);
+ eth_zero_addr(data->mac[2]);
/* Copy the first two UC macs */
netif_addr_lock_bh(edev->ndev);
i = 1;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 81b0f7d3a025..5e37c8313725 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -17,7 +17,7 @@ config NET_VENDOR_SOLARFLARE
if NET_VENDOR_SOLARFLARE
config SFC
- tristate "Solarflare SFC9000/SFC9100-family support"
+ tristate "Solarflare SFC9000/SFC9100/EF100-family support"
depends on PCI
select MDIO
select CRC32
@@ -26,6 +26,9 @@ config SFC
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9000-family and SFC9100-family controllers.
+ It also supports 10/25/40/100-gigabit Ethernet cards based
+ on the Solarflare EF100 networking IP in Xilinx FPGAs.
+
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 87d093da22ca..8bd01c429f91 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -4,7 +4,9 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
- mcdi_functions.o mcdi_filters.o mcdi_mon.o
+ mcdi_functions.o mcdi_filters.o mcdi_mon.o \
+ ef100.o ef100_nic.o ef100_netdev.o \
+ ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fa7229fff2ff..4b0b2cf026a5 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6,6 +6,7 @@
#include "net_driver.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
@@ -3977,6 +3978,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_probe = efx_mcdi_rx_probe,
@@ -3984,6 +3986,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
.ev_fini = efx_mcdi_ev_fini,
@@ -4038,6 +4041,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.rx_hash_key_size = 40,
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4087,6 +4091,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
@@ -4097,6 +4102,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
.ev_fini = efx_mcdi_ev_fini,
@@ -4172,4 +4178,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_hash_key_size = 40,
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
};
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
new file mode 100644
index 000000000000..9729983f4840
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/aer.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "io.h"
+#include "ef100_nic.h"
+#include "ef100_netdev.h"
+#include "ef100_regs.h"
+#include "ef100.h"
+
+#define EFX_EF100_PCI_DEFAULT_BAR 2
+
+/* Number of bytes at start of vendor specified extended capability that indicate
+ * that the capability is vendor specified. i.e. offset from value returned by
+ * pci_find_next_ext_capability() to beginning of vendor specified capability
+ * header.
+ */
+#define PCI_EXT_CAP_HDR_LENGTH 4
+
+/* Expected size of a Xilinx continuation address table entry. */
+#define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16
+
+struct ef100_func_ctl_window {
+ bool valid;
+ unsigned int bar;
+ u64 offset;
+};
+
+static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
+ struct ef100_func_ctl_window *result);
+
+/* Number of bytes to offset when reading bit position x with dword accessors. */
+#define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3)
+
+#define EXTRACT_BITS(x, lbn, width) \
+ (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1))
+
+static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx,
+ int structure_start,
+ int lbn, int width)
+{
+ efx_dword_t dword;
+
+ efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn));
+
+ return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width);
+}
+
+#define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \
+ _ef100_pci_get_bar_bits_with_width(efx, entry_location, \
+ ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \
+ ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH)
+
+static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location,
+ struct ef100_func_ctl_window *result)
+{
+ u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) <<
+ ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT;
+ u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "Found EF100 function control window bar=%d offset=0x%llx\n",
+ bar, offset);
+
+ if (result->valid) {
+ netif_err(efx, probe, efx->net_dev,
+ "Duplicated EF100 table entry.\n");
+ return -EINVAL;
+ }
+
+ if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ result->bar = bar;
+ result->offset = offset;
+ result->valid = true;
+ return 0;
+}
+
+static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar,
+ u64 next_entry)
+{
+ return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE >
+ pci_resource_len(efx->pci_dev, bar);
+}
+
+/* Parse a Xilinx capabilities table entry describing a continuation to a new
+ * sub-table.
+ */
+static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location,
+ struct ef100_func_ctl_window *result)
+{
+ unsigned int previous_bar;
+ efx_oword_t entry;
+ u64 offset;
+ int rc = 0;
+ u32 bar;
+
+ efx_reado(efx, &entry, entry_location);
+
+ bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR);
+
+ offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) <<
+ ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT;
+
+ previous_bar = efx->mem_bar;
+
+ if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_VSEC_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ if (bar != previous_bar) {
+ efx_fini_io(efx);
+
+ if (ef100_pci_does_bar_overflow(efx, bar, offset)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
+ bar, offset);
+ return -EINVAL;
+ }
+
+ /* Temporarily map new BAR. */
+ rc = efx_init_io(efx, bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Mapping new BAR for Xilinx table failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = ef100_pci_walk_xilinx_table(efx, offset, result);
+ if (rc)
+ return rc;
+
+ if (bar != previous_bar) {
+ efx_fini_io(efx);
+
+ /* Put old BAR back. */
+ rc = efx_init_io(efx, previous_bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, previous_bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Putting old BAR back failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Iterate over the Xilinx capabilities table in the currently mapped BAR and
+ * call ef100_pci_parse_ef100_entry() on any EF100 entries and
+ * ef100_pci_parse_continue_entry() on any table continuations.
+ */
+static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
+ struct ef100_func_ctl_window *result)
+{
+ u64 current_entry = offset;
+ int rc = 0;
+
+ while (true) {
+ u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT);
+ u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST);
+ u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV);
+ u32 entry_size;
+
+ if (id == ESE_GZ_CFGBAR_ENTRY_LAST)
+ return 0;
+
+ entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n",
+ id, entry_size, current_entry, efx->mem_bar);
+
+ if (entry_size < sizeof(u32) * 2) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table entry too short len=0x%x\n", entry_size);
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case ESE_GZ_CFGBAR_ENTRY_EF100:
+ if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 ||
+ entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
+ entry_size, rev);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_parse_ef100_entry(efx, current_entry,
+ result);
+ if (rc)
+ return rc;
+ break;
+ case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR:
+ if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
+ entry_size, rev);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_parse_continue_entry(efx, current_entry, result);
+ if (rc)
+ return rc;
+ break;
+ default:
+ /* Ignore unknown table entries. */
+ break;
+ }
+
+ if (last)
+ return 0;
+
+ current_entry += entry_size;
+
+ if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table overrun at position=0x%llx.\n",
+ current_entry);
+ return -EINVAL;
+ }
+ }
+}
+
+static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx,
+ int structure_start, int lbn,
+ int width, u32 *result)
+{
+ int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn);
+ u32 temp;
+
+ rc = pci_read_config_dword(efx->pci_dev, pos, &temp);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read PCI config dword at %d\n",
+ pos);
+ return rc;
+ }
+
+ *result = EXTRACT_BITS(temp, lbn, width);
+
+ return 0;
+}
+
+#define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \
+ _ef100_pci_get_config_bits_with_width(efx, entry_location, \
+ ESF_GZ_VSEC_ ## bitdef ## _LBN, \
+ ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result)
+
+/* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed
+ * to by this PCI_EXT_CAP_ID_VNDR.
+ */
+static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap,
+ bool has_offset_hi,
+ struct ef100_func_ctl_window *result)
+{
+ u32 offset_high = 0;
+ u32 offset_lo = 0;
+ u64 offset = 0;
+ u32 bar = 0;
+ int rc = 0;
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM ||
+ bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) {
+ netif_err(efx, probe, efx->net_dev,
+ "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
+ bar);
+ return -EINVAL;
+ }
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get optional extension to 64bit offset. */
+ if (has_offset_hi) {
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) |
+ (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT);
+
+ if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) {
+ netif_err(efx, probe, efx->net_dev,
+ "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
+ bar, offset);
+ return -EINVAL;
+ }
+
+ /* Temporarily map BAR. */
+ rc = efx_init_io(efx, bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, bar));
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "efx_init_io failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ef100_pci_walk_xilinx_table(efx, offset, result);
+
+ /* Unmap temporarily mapped BAR. */
+ efx_fini_io(efx);
+ return rc;
+}
+
+/* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR
+ * capability.
+ */
+static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
+ struct ef100_func_ctl_window *result)
+{
+ int num_xilinx_caps = 0;
+ int cap = 0;
+
+ result->valid = false;
+
+ while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) {
+ int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH;
+ u32 vsec_ver = 0;
+ u32 vsec_len = 0;
+ u32 vsec_id = 0;
+ int rc = 0;
+
+ num_xilinx_caps++;
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_ID, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_VER, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get length of whole capability - i.e. starting at cap */
+ rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (vsec_id == ESE_GZ_XILINX_VSEC_ID &&
+ vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR &&
+ vsec_len >= ESE_GZ_VSEC_LEN_MIN) {
+ bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT);
+
+ rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap,
+ has_offset_hi, result);
+ if (rc)
+ return rc;
+ }
+ }
+
+ if (num_xilinx_caps && !result->valid) {
+ netif_err(efx, probe, efx->net_dev,
+ "Seen %d Xilinx tables, but no EF100 entry.\n",
+ num_xilinx_caps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
+ */
+static void ef100_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ /* Unregistering our netdev notifier triggers unbinding of TC indirect
+ * blocks, so we have to do it before PCI removal.
+ */
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+ ef100_remove(efx);
+ efx_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
+};
+
+static int ef100_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ struct ef100_func_ctl_window fcw = { 0 };
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct efx_nic_type *)entry->driver_data;
+
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_init_struct(efx, pci_dev, net_dev);
+ if (rc)
+ goto fail;
+
+ efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare EF100 NIC detected\n");
+
+ rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
+ goto fail;
+ }
+
+ if (!fcw.valid) {
+ /* Extended capability not found - use defaults. */
+ fcw.bar = EFX_EF100_PCI_DEFAULT_BAR;
+ fcw.offset = 0;
+ fcw.valid = true;
+ }
+
+ if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
+ netif_err(efx, probe, efx->net_dev,
+ "Func control window overruns BAR\n");
+ goto fail;
+ }
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_init_io(efx, fcw.bar,
+ DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ pci_resource_len(efx->pci_dev, fcw.bar));
+ if (rc)
+ goto fail;
+
+ efx->reg_base = fcw.offset;
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = efx->type->probe(efx);
+ if (rc)
+ goto fail;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ return 0;
+
+fail:
+ ef100_pci_remove(pci_dev);
+ return rc;
+}
+
+/* PCI device ID table */
+static const struct pci_device_id ef100_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
+ .driver_data = (unsigned long) &ef100_pf_nic_type },
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */
+ .driver_data = (unsigned long) &ef100_vf_nic_type },
+ {0} /* end of list */
+};
+
+struct pci_driver ef100_pci_driver = {
+ .name = "sfc_ef100",
+ .id_table = ef100_pci_table,
+ .probe = ef100_pci_probe,
+ .remove = ef100_pci_remove,
+ .err_handler = &efx_err_handlers,
+};
+
+MODULE_DEVICE_TABLE(pci, ef100_pci_table);
diff --git a/drivers/net/ethernet/sfc/ef100.h b/drivers/net/ethernet/sfc/ef100.h
new file mode 100644
index 000000000000..a63f0ff6641b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+extern struct pci_driver ef100_pci_driver;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
new file mode 100644
index 000000000000..729c425d0f78
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi_port_common.h"
+#include "ethtool_common.h"
+#include "ef100_ethtool.h"
+#include "mcdi_functions.h"
+
+/* Ethtool options available
+ */
+const struct ethtool_ops ef100_ethtool_ops = {
+ .get_drvinfo = efx_ethtool_get_drvinfo,
+};
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.h b/drivers/net/ethernet/sfc/ef100_ethtool.h
new file mode 100644
index 000000000000..6efda72dfc6c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+extern const struct ethtool_ops ef100_ethtool_ops;
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
new file mode 100644
index 000000000000..63c311ba28b9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include "net_driver.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "tx_common.h"
+#include "ef100_netdev.h"
+#include "ef100_ethtool.h"
+#include "nic_common.h"
+#include "ef100_nic.h"
+#include "ef100_tx.h"
+#include "ef100_regs.h"
+#include "mcdi_filters.h"
+#include "rx_common.h"
+
+static void ef100_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+}
+
+static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
+{
+ /* EF100 uses a single TXQ per channel, as all checksum offloading
+ * is configured in the TX descriptor, and there is no TX Pacer for
+ * HIGHPRI queues.
+ */
+ unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
+ unsigned int rx_vis = efx->n_rx_channels;
+ unsigned int min_vis, max_vis;
+
+ EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
+
+ tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
+
+ max_vis = max(rx_vis, tx_vis);
+ /* Currently don't handle resource starvation and only accept
+ * our maximum needs and no less.
+ */
+ min_vis = max_vis;
+
+ return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
+ NULL, allocated_vis);
+}
+
+static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
+{
+ unsigned int uc_mem_map_size;
+ void __iomem *membase;
+
+ efx->max_vis = max_vis;
+ uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
+
+ /* Extend the original UC mapping of the memory BAR */
+ membase = ioremap(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not extend memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int ef100_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ netif_stop_queue(net_dev);
+ efx_stop_all(efx);
+ efx_mcdi_mac_fini_stats(efx);
+ efx_disable_interrupts(efx);
+ efx_clear_interrupt_affinity(efx);
+ efx_nic_fini_interrupt(efx);
+ efx_remove_filters(efx);
+ efx_fini_napi(efx);
+ efx_remove_channels(efx);
+ efx_mcdi_free_vis(efx);
+ efx_remove_interrupts(efx);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int ef100_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int allocated_vis;
+ int rc;
+
+ ef100_update_name(efx);
+ netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_set_channels(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_mcdi_free_vis(efx);
+ if (rc)
+ goto fail;
+
+ rc = ef100_alloc_vis(efx, &allocated_vis);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ return rc;
+
+ rc = ef100_remap_bar(efx, allocated_vis);
+ if (rc)
+ goto fail;
+
+ efx_init_napi(efx);
+
+ rc = efx_probe_filters(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_nic_init_interrupt(efx);
+ if (rc)
+ goto fail;
+ efx_set_interrupt_affinity(efx);
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ /* in case the MC rebooted while we were stopped, consume the change
+ * to the warm reboot count
+ */
+ (void) efx_mcdi_poll_reboot(efx);
+
+ rc = efx_mcdi_mac_init_stats(efx);
+ if (rc)
+ goto fail;
+
+ efx_start_all(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx_mcdi_phy_poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+
+fail:
+ ef100_net_stop(net_dev);
+ return rc;
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels).
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int rc;
+
+ channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
+ netif_vdbg(efx, tx_queued, efx->net_dev,
+ "%s len %d data %d channel %d\n", __func__,
+ skb->len, skb->data_len, channel->channel);
+ if (!efx->n_channels || !efx->n_tx_channels || !channel) {
+ netif_stop_queue(net_dev);
+ goto err;
+ }
+
+ tx_queue = &channel->tx_queue[0];
+ rc = ef100_enqueue_skb(tx_queue, skb);
+ if (rc == 0)
+ return NETDEV_TX_OK;
+
+err:
+ net_dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ef100_netdev_ops = {
+ .ndo_open = ef100_net_open,
+ .ndo_stop = ef100_net_stop,
+ .ndo_start_xmit = ef100_hard_start_xmit,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */
+ .ndo_get_phys_port_id = efx_get_phys_port_id,
+ .ndo_get_phys_port_name = efx_get_phys_port_name,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
+/* Netdev registration
+ */
+int ef100_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ ef100_update_name(efx);
+
+ return NOTIFY_DONE;
+}
+
+int ef100_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &ef100_netdev_ops;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+ net_dev->ethtool_ops = &ef100_ethtool_ops;
+
+ rtnl_lock();
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ ef100_update_name(efx);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
+ efx->state = STATE_READY;
+ rtnl_unlock();
+ efx_init_mcdi_logging(efx);
+
+ return 0;
+
+fail_locked:
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+}
+
+void ef100_unregister_netdev(struct efx_nic *efx)
+{
+ if (efx_dev_registered(efx)) {
+ efx_fini_mcdi_logging(efx);
+ efx->state = STATE_UNINIT;
+ unregister_netdev(efx->net_dev);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
new file mode 100644
index 000000000000..d40abb7cc086
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+
+int ef100_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
+int ef100_register_netdev(struct efx_nic *efx);
+void ef100_unregister_netdev(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
new file mode 100644
index 000000000000..8a2126fec078
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -0,0 +1,1276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_nic.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "io.h"
+#include "selftest.h"
+#include "ef100_regs.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
+#include "mcdi_filters.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
+#include "ef100_netdev.h"
+
+#define EF100_MAX_VIS 4096
+#define EF100_NUM_MCDI_BUFFERS 1
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+#define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
+
+/* MCDI
+ */
+static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, dma_addr_t *dma_addr)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ if (dma_addr)
+ *dma_addr = nic_data->mcdi_buf.dma_addr +
+ bufid * ALIGN(MCDI_BUF_LEN, 256);
+ return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256);
+}
+
+static int ef100_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, efx_reg(efx, ER_GZ_MC_SFT_STATUS));
+
+ if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) {
+ netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n");
+ efx->state = STATE_DISABLED;
+ return -ENETDOWN;
+ } else {
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+ }
+}
+
+static void ef100_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ dma_addr_t dma_addr;
+ u8 *pdu = ef100_mcdi_buf(efx, 0, &dma_addr);
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD));
+ _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD));
+}
+
+static bool ef100_mcdi_poll_response(struct efx_nic *efx)
+{
+ const efx_dword_t hdr =
+ *(const efx_dword_t *)(ef100_mcdi_buf(efx, 0, NULL));
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void ef100_mcdi_read_response(struct efx_nic *efx,
+ efx_dword_t *outbuf, size_t offset,
+ size_t outlen)
+{
+ const u8 *pdu = ef100_mcdi_buf(efx, 0, NULL);
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int ef100_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = ef100_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ return -EIO;
+}
+
+static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
+{
+}
+
+/* MCDI calls
+ */
+static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
+ return 0;
+}
+
+static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u8 vi_window_mode;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
+
+ nic_data->datapath_caps = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_OUT_FLAGS1);
+ nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_FLAGS2);
+ if (outlen < MC_CMD_GET_CAPABILITIES_V7_OUT_LEN)
+ nic_data->datapath_caps3 = 0;
+ else
+ nic_data->datapath_caps3 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V7_OUT_FLAGS3);
+
+ vi_window_mode = MCDI_BYTE(outbuf,
+ GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+ rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
+ if (rc)
+ return rc;
+
+ if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3))
+ efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ efx->num_mac_stats = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware reports num_mac_stats = %u\n",
+ efx->num_mac_stats);
+ return 0;
+}
+
+/* Event handling
+ */
+static int ef100_ev_probe(struct efx_channel *channel)
+{
+ /* Allocate an extra descriptor for the QMDA status completion entry */
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 2) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int ef100_ev_init(struct efx_channel *channel)
+{
+ struct ef100_nic_data *nic_data = channel->efx->nic_data;
+
+ /* initial phase is 0 */
+ clear_bit(channel->channel, nic_data->evq_phases);
+
+ return efx_mcdi_ev_init(channel, false, false);
+}
+
+static void ef100_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t evq_prime;
+
+ EFX_POPULATE_DWORD_2(evq_prime,
+ ERF_GZ_EVQ_ID, channel->channel,
+ ERF_GZ_IDX, channel->eventq_read_ptr &
+ channel->eventq_mask);
+
+ efx_writed(channel->efx, &evq_prime,
+ efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
+}
+
+static int ef100_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
+ bool evq_phase, old_evq_phase;
+ unsigned int read_ptr;
+ efx_qword_t *p_event;
+ int spent = 0;
+ bool ev_phase;
+ int ev_type;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ nic_data = efx->nic_data;
+ evq_phase = test_bit(channel->channel, nic_data->evq_phases);
+ old_evq_phase = evq_phase;
+ read_ptr = channel->eventq_read_ptr;
+ BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN);
+
+ while (spent < quota) {
+ p_event = efx_event(channel, read_ptr);
+
+ ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE);
+ if (ev_phase != evq_phase)
+ break;
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(*p_event));
+
+ ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE);
+
+ switch (ev_type) {
+ case ESE_GZ_EF100_EV_RX_PKTS:
+ efx_ef100_ev_rx(channel, p_event);
+ ++spent;
+ break;
+ case ESE_GZ_EF100_EV_MCDI:
+ efx_mcdi_process_event(channel, p_event);
+ break;
+ case ESE_GZ_EF100_EV_TX_COMPLETION:
+ ef100_ev_tx(channel, p_event);
+ break;
+ case ESE_GZ_EF100_EV_DRIVER:
+ netif_info(efx, drv, efx->net_dev,
+ "Driver initiated event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*p_event));
+ break;
+ default:
+ netif_info(efx, drv, efx->net_dev,
+ "Unhandled event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*p_event));
+ }
+
+ ++read_ptr;
+ if ((read_ptr & channel->eventq_mask) == 0)
+ evq_phase = !evq_phase;
+ }
+
+ channel->eventq_read_ptr = read_ptr;
+ if (evq_phase != old_evq_phase)
+ change_bit(channel->channel, nic_data->evq_phases);
+
+ return spent;
+}
+
+static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ef100_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ int rc;
+
+ /* Probe for the PHY */
+ efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL);
+ if (!efx->phy_data)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data);
+ if (rc)
+ return rc;
+
+ /* Populate driver and ethtool settings */
+ phy_data = efx->phy_data;
+ mcdi_to_ethtool_linkset(phy_data->media, phy_data->supported_cap,
+ efx->link_advertising);
+ efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap,
+ false);
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ /* Push settings to the PHY. Failure is not fatal, the user can try to
+ * fix it using ethtool.
+ */
+ rc = efx_mcdi_port_reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "could not initialise PHY settings\n");
+
+ return 0;
+}
+
+static int ef100_filter_table_probe(struct efx_nic *efx)
+{
+ return efx_mcdi_filter_table_probe(efx, true);
+}
+
+static int ef100_filter_table_up(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ if (rc) {
+ efx_mcdi_filter_table_down(efx);
+ return rc;
+ }
+
+ rc = efx_mcdi_filter_add_vlan(efx, 0);
+ if (rc) {
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ efx_mcdi_filter_table_down(efx);
+ }
+
+ return rc;
+}
+
+static void ef100_filter_table_down(struct efx_nic *efx)
+{
+ efx_mcdi_filter_del_vlan(efx, 0);
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ efx_mcdi_filter_table_down(efx);
+}
+
+/* Other
+ */
+static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only)
+{
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_mcdi_filter_sync_rx_mode(efx);
+
+ if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
+ return efx_mcdi_set_mtu(efx);
+ return efx_mcdi_set_mac(efx);
+}
+
+static enum reset_type ef100_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_TX_WATCHDOG)
+ return reason;
+ return RESET_TYPE_DISABLE;
+}
+
+static int ef100_map_reset_flags(u32 *flags)
+{
+ /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */
+ if ((*flags & EF100_RESET_PORT)) {
+ *flags &= ~EF100_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+ if (*flags & ETH_RESET_MGMT) {
+ *flags &= ~ETH_RESET_MGMT;
+ return RESET_TYPE_DISABLE;
+ }
+
+ return -EINVAL;
+}
+
+static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc;
+
+ dev_close(efx->net_dev);
+
+ if (reset_type == RESET_TYPE_TX_WATCHDOG) {
+ netif_device_attach(efx->net_dev);
+ __clear_bit(reset_type, &efx->reset_pending);
+ rc = dev_open(efx->net_dev, NULL);
+ } else if (reset_type == RESET_TYPE_ALL) {
+ /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
+ * and reprobe after reset to avoid removing filters twice
+ */
+ down_read(&efx->filter_sem);
+ ef100_filter_table_down(efx);
+ up_read(&efx->filter_sem);
+ rc = efx_mcdi_reset(efx, reset_type);
+ if (rc)
+ return rc;
+
+ netif_device_attach(efx->net_dev);
+
+ down_read(&efx->filter_sem);
+ rc = ef100_filter_table_up(efx);
+ up_read(&efx->filter_sem);
+ if (rc)
+ return rc;
+
+ rc = dev_open(efx->net_dev, NULL);
+ } else {
+ rc = 1; /* Leave the device closed */
+ }
+ return rc;
+}
+
+static void ef100_common_stat_mask(unsigned long *mask)
+{
+ __set_bit(EF100_STAT_port_rx_packets, mask);
+ __set_bit(EF100_STAT_port_tx_packets, mask);
+ __set_bit(EF100_STAT_port_rx_bytes, mask);
+ __set_bit(EF100_STAT_port_tx_bytes, mask);
+ __set_bit(EF100_STAT_port_rx_multicast, mask);
+ __set_bit(EF100_STAT_port_rx_bad, mask);
+ __set_bit(EF100_STAT_port_rx_align_error, mask);
+ __set_bit(EF100_STAT_port_rx_overflow, mask);
+}
+
+static void ef100_ethtool_stat_mask(unsigned long *mask)
+{
+ __set_bit(EF100_STAT_port_tx_pause, mask);
+ __set_bit(EF100_STAT_port_tx_unicast, mask);
+ __set_bit(EF100_STAT_port_tx_multicast, mask);
+ __set_bit(EF100_STAT_port_tx_broadcast, mask);
+ __set_bit(EF100_STAT_port_tx_lt64, mask);
+ __set_bit(EF100_STAT_port_tx_64, mask);
+ __set_bit(EF100_STAT_port_tx_65_to_127, mask);
+ __set_bit(EF100_STAT_port_tx_128_to_255, mask);
+ __set_bit(EF100_STAT_port_tx_256_to_511, mask);
+ __set_bit(EF100_STAT_port_tx_512_to_1023, mask);
+ __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask);
+ __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask);
+ __set_bit(EF100_STAT_port_rx_good, mask);
+ __set_bit(EF100_STAT_port_rx_pause, mask);
+ __set_bit(EF100_STAT_port_rx_unicast, mask);
+ __set_bit(EF100_STAT_port_rx_broadcast, mask);
+ __set_bit(EF100_STAT_port_rx_lt64, mask);
+ __set_bit(EF100_STAT_port_rx_64, mask);
+ __set_bit(EF100_STAT_port_rx_65_to_127, mask);
+ __set_bit(EF100_STAT_port_rx_128_to_255, mask);
+ __set_bit(EF100_STAT_port_rx_256_to_511, mask);
+ __set_bit(EF100_STAT_port_rx_512_to_1023, mask);
+ __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask);
+ __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask);
+ __set_bit(EF100_STAT_port_rx_gtjumbo, mask);
+ __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask);
+ __set_bit(EF100_STAT_port_rx_length_error, mask);
+ __set_bit(EF100_STAT_port_rx_nodesc_drops, mask);
+ __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask);
+ __set_bit(GENERIC_STAT_rx_noskb_drops, mask);
+}
+
+#define EF100_DMA_STAT(ext_name, mcdi_name) \
+ [EF100_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+
+static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
+ EF100_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF100_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF100_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF100_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF100_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
+ EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
+ EFX_GENERIC_SW_STAT(rx_noskb_drops),
+};
+
+static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+
+ ef100_ethtool_stat_mask(mask);
+ return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT,
+ mask, names);
+}
+
+static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+ size_t stats_count = 0, index;
+ u64 *stats = nic_data->stats;
+
+ ef100_ethtool_stat_mask(mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF100_STAT_COUNT) {
+ if (ef100_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (!core_stats)
+ return stats_count;
+
+ core_stats->rx_packets = stats[EF100_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF100_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF100_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF100_STAT_port_rx_gtjumbo] +
+ stats[EF100_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF100_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+
+ return stats_count;
+}
+
+static size_t ef100_update_stats(struct efx_nic *efx,
+ u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ __le64 *mc_stats = kmalloc(array_size(efx->num_mac_stats, sizeof(__le64)), GFP_ATOMIC);
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+ u64 *stats = nic_data->stats;
+
+ ef100_common_stat_mask(mask);
+ ef100_ethtool_stat_mask(mask);
+
+ efx_nic_copy_stats(efx, mc_stats);
+ efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
+ stats, mc_stats, false);
+
+ kfree(mc_stats);
+
+ return ef100_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
+
+static int efx_ef100_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+#define EFX_EF100_TEST 1
+
+static void efx_ef100_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER,
+ ESF_GZ_DRIVER_DATA, EFX_EF100_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc && (rc != -ENETDOWN))
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static unsigned int ef100_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ const struct ef100_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST:
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST:
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST:
+ return nic_data->datapath_caps3 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
+/* NIC level access functions
+ */
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
+
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_pf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+static int compare_versions(const char *a, const char *b)
+{
+ int a_major, a_minor, a_point, a_patch;
+ int b_major, b_minor, b_point, b_patch;
+ int a_matched, b_matched;
+
+ a_matched = sscanf(a, "%d.%d.%d.%d", &a_major, &a_minor, &a_point, &a_patch);
+ b_matched = sscanf(b, "%d.%d.%d.%d", &b_major, &b_minor, &b_point, &b_patch);
+
+ if (a_matched == 4 && b_matched != 4)
+ return +1;
+
+ if (a_matched != 4 && b_matched == 4)
+ return -1;
+
+ if (a_matched != 4 && b_matched != 4)
+ return 0;
+
+ if (a_major != b_major)
+ return a_major - b_major;
+
+ if (a_minor != b_minor)
+ return a_minor - b_minor;
+
+ if (a_point != b_point)
+ return a_point - b_point;
+
+ return a_patch - b_patch;
+}
+
+enum ef100_tlv_state_machine {
+ EF100_TLV_TYPE,
+ EF100_TLV_TYPE_CONT,
+ EF100_TLV_LENGTH,
+ EF100_TLV_VALUE
+};
+
+struct ef100_tlv_state {
+ enum ef100_tlv_state_machine state;
+ u64 value;
+ u32 value_offset;
+ u16 type;
+ u8 len;
+};
+
+static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte)
+{
+ switch (state->state) {
+ case EF100_TLV_TYPE:
+ state->type = byte & 0x7f;
+ state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT
+ : EF100_TLV_LENGTH;
+ /* Clear ready to read in a new entry */
+ state->value = 0;
+ state->value_offset = 0;
+ return 0;
+ case EF100_TLV_TYPE_CONT:
+ state->type |= byte << 7;
+ state->state = EF100_TLV_LENGTH;
+ return 0;
+ case EF100_TLV_LENGTH:
+ state->len = byte;
+ /* We only handle TLVs that fit in a u64 */
+ if (state->len > sizeof(state->value))
+ return -EOPNOTSUPP;
+ /* len may be zero, implying a value of zero */
+ state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE;
+ return 0;
+ case EF100_TLV_VALUE:
+ state->value |= ((u64)byte) << (state->value_offset * 8);
+ state->value_offset++;
+ if (state->value_offset >= state->len)
+ state->state = EF100_TLV_TYPE;
+ return 0;
+ default: /* state machine error, can't happen */
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
+static int ef100_process_design_param(struct efx_nic *efx,
+ const struct ef100_tlv_state *reader)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ switch (reader->type) {
+ case ESE_EF100_DP_GZ_PAD: /* padding, skip it */
+ return 0;
+ case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS:
+ /* Driver doesn't support timestamping yet, so we don't care */
+ return 0;
+ case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS:
+ /* Driver doesn't support unsolicited-event credits yet, so
+ * we don't care
+ */
+ return 0;
+ case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE:
+ /* Driver doesn't manage the NMMU (so we don't care) */
+ return 0;
+ case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS:
+ /* Driver uses CHECKSUM_COMPLETE, so we don't care about
+ * protocol checksum validation
+ */
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN:
+ nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff);
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
+ /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
+ if (!reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "TSO_MAX_HDR_NUM_SEGS < 1\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY:
+ case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY:
+ /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by
+ * EFX_MIN_DMAQ_SIZE, so we just need to check that
+ * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
+ * This is very unlikely to fail.
+ */
+ if (EFX_MIN_DMAQ_SIZE % reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "%s size granularity is %llu, can't guarantee safety\n",
+ reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
+ reader->value);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
+ nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
+ efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
+ nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
+ efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
+ nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
+ return 0;
+ case ESE_EF100_DP_GZ_COMPAT:
+ if (reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
+ reader->value);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN:
+ /* Driver doesn't use mem2mem transfers */
+ return 0;
+ case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS:
+ /* Driver doesn't currently use EVQ_TIMER */
+ return 0;
+ case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES:
+ /* Driver doesn't manage the NMMU (so we don't care) */
+ return 0;
+ case ESE_EF100_DP_GZ_VI_STRIDES:
+ /* We never try to set the VI stride, and we don't rely on
+ * being able to find VIs past VI 0 until after we've learned
+ * the current stride from MC_CMD_GET_CAPABILITIES.
+ * So the value of this shouldn't matter.
+ */
+ if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
+ netif_dbg(efx, probe, efx->net_dev,
+ "NIC has other than default VI_STRIDES (mask "
+ "%#llx), early probing might use wrong one\n",
+ reader->value);
+ return 0;
+ case ESE_EF100_DP_GZ_RX_MAX_RUNT:
+ /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
+ * care whether it indicates runt or overlength for any given
+ * packet, so we don't care about this parameter.
+ */
+ return 0;
+ default:
+ /* Host interface says "Drivers should ignore design parameters
+ * that they do not recognise."
+ */
+ netif_dbg(efx, probe, efx->net_dev,
+ "Ignoring unrecognised design parameter %u\n",
+ reader->type);
+ return 0;
+ }
+}
+
+static int ef100_check_design_params(struct efx_nic *efx)
+{
+ struct ef100_tlv_state reader = {};
+ u32 total_len, offset = 0;
+ efx_dword_t reg;
+ int rc = 0, i;
+ u32 data;
+
+ efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
+ total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+ netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
+ total_len);
+ while (offset < total_len) {
+ efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
+ data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+ for (i = 0; i < sizeof(data); i++) {
+ rc = ef100_tlv_feed(&reader, data);
+ /* Got a complete value? */
+ if (!rc && reader.state == EF100_TLV_TYPE)
+ rc = ef100_process_design_param(efx, &reader);
+ if (rc)
+ goto out;
+ data >>= 8;
+ offset++;
+ }
+ }
+ /* Check we didn't end halfway through a TLV entry, which could either
+ * mean that the TLV stream is truncated or just that it's corrupted
+ * and our state machine is out of sync.
+ */
+ if (reader.state != EF100_TLV_TYPE) {
+ if (reader.state == EF100_TLV_TYPE_CONT)
+ netif_err(efx, probe, efx->net_dev,
+ "truncated design parameter (incomplete type %u)\n",
+ reader.type);
+ else
+ netif_err(efx, probe, efx->net_dev,
+ "truncated design parameter %u\n",
+ reader.type);
+ rc = -EIO;
+ }
+out:
+ return rc;
+}
+
+/* NIC probe and remove
+ */
+static int ef100_probe_main(struct efx_nic *efx)
+{
+ unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
+ struct net_device *net_dev = efx->net_dev;
+ struct ef100_nic_data *nic_data;
+ char fw_version[32];
+ int i, rc;
+
+ if (WARN_ON(bar_size == 0))
+ return -EIO;
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+ nic_data->efx = efx;
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+
+ /* Populate design-parameter defaults */
+ nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
+ nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
+ nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
+ nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
+ net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
+ /* Read design parameters */
+ rc = ef100_check_design_params(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unsupported design parameters\n");
+ goto fail;
+ }
+
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
+ /* MCDI buffers must be 256 byte aligned. */
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN,
+ GFP_KERNEL);
+ if (rc)
+ goto fail;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = ef100_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD));
+
+ /* Post-IO section. */
+
+ rc = efx_mcdi_init(efx);
+ if (!rc && efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ netif_info(efx, probe, efx->net_dev,
+ "No network port on this PCI function");
+ rc = -ENODEV;
+ }
+ if (rc)
+ goto fail;
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail;
+
+ rc = efx_get_pf_index(efx, &nic_data->pf_index);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ efx->max_vis = EF100_MAX_VIS;
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail;
+ efx->port_num = rc;
+
+ efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
+ netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+
+ if (compare_versions(fw_version, "1.1.0.1000") < 0) {
+ netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
+ netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ return rc;
+}
+
+int ef100_probe_pf(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct ef100_nic_data *nic_data;
+ int rc = ef100_probe_main(efx);
+
+ if (rc)
+ goto fail;
+
+ nic_data = efx->nic_data;
+ rc = ef100_get_mac_address(efx, net_dev->perm_addr);
+ if (rc)
+ goto fail;
+ /* Assign MAC address */
+ memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
+ memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+int ef100_probe_vf(struct efx_nic *efx)
+{
+ return ef100_probe_main(efx);
+}
+
+void ef100_remove(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ ef100_unregister_netdev(efx);
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ efx_mcdi_detach(efx);
+ efx_mcdi_fini(efx);
+ if (nic_data)
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+ efx->nic_data = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
new file mode 100644
index 000000000000..e799688d5264
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "nic_common.h"
+
+extern const struct efx_nic_type ef100_pf_nic_type;
+extern const struct efx_nic_type ef100_vf_nic_type;
+
+int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_vf(struct efx_nic *efx);
+void ef100_remove(struct efx_nic *efx);
+
+enum {
+ EF100_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF100_STAT_port_tx_packets,
+ EF100_STAT_port_tx_pause,
+ EF100_STAT_port_tx_unicast,
+ EF100_STAT_port_tx_multicast,
+ EF100_STAT_port_tx_broadcast,
+ EF100_STAT_port_tx_lt64,
+ EF100_STAT_port_tx_64,
+ EF100_STAT_port_tx_65_to_127,
+ EF100_STAT_port_tx_128_to_255,
+ EF100_STAT_port_tx_256_to_511,
+ EF100_STAT_port_tx_512_to_1023,
+ EF100_STAT_port_tx_1024_to_15xx,
+ EF100_STAT_port_tx_15xx_to_jumbo,
+ EF100_STAT_port_rx_bytes,
+ EF100_STAT_port_rx_packets,
+ EF100_STAT_port_rx_good,
+ EF100_STAT_port_rx_bad,
+ EF100_STAT_port_rx_pause,
+ EF100_STAT_port_rx_unicast,
+ EF100_STAT_port_rx_multicast,
+ EF100_STAT_port_rx_broadcast,
+ EF100_STAT_port_rx_lt64,
+ EF100_STAT_port_rx_64,
+ EF100_STAT_port_rx_65_to_127,
+ EF100_STAT_port_rx_128_to_255,
+ EF100_STAT_port_rx_256_to_511,
+ EF100_STAT_port_rx_512_to_1023,
+ EF100_STAT_port_rx_1024_to_15xx,
+ EF100_STAT_port_rx_15xx_to_jumbo,
+ EF100_STAT_port_rx_gtjumbo,
+ EF100_STAT_port_rx_bad_gtjumbo,
+ EF100_STAT_port_rx_align_error,
+ EF100_STAT_port_rx_length_error,
+ EF100_STAT_port_rx_overflow,
+ EF100_STAT_port_rx_nodesc_drops,
+ EF100_STAT_COUNT
+};
+
+struct ef100_nic_data {
+ struct efx_nic *efx;
+ struct efx_buffer mcdi_buf;
+ u32 datapath_caps;
+ u32 datapath_caps2;
+ u32 datapath_caps3;
+ unsigned int pf_index;
+ u16 warm_boot_count;
+ u8 port_id[ETH_ALEN];
+ DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
+ u64 stats[EF100_STAT_COUNT];
+ u16 tso_max_hdr_len;
+ u16 tso_max_payload_num_segs;
+ u16 tso_max_frames;
+ unsigned int tso_max_payload_len;
+};
+
+#define efx_ef100_has_cap(caps, flag) \
+ (!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h
new file mode 100644
index 000000000000..710bbdb19885
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_regs.h
@@ -0,0 +1,693 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_REGS_H
+#define EFX_EF100_REGS_H
+
+/* EF100 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * G: Riverhead
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF100 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* HW_REV_ID_REG: Hardware revision info register */
+#define ER_GZ_HW_REV_ID 0x00000000
+
+/* NIC_REV_ID: SoftNIC revision info register */
+#define ER_GZ_NIC_REV_ID 0x00000004
+
+/* NIC_MAGIC: Signature register that should contain a well-known value */
+#define ER_GZ_NIC_MAGIC 0x00000008
+#define ERF_GZ_NIC_MAGIC_LBN 0
+#define ERF_GZ_NIC_MAGIC_WIDTH 32
+#define EFE_GZ_NIC_MAGIC_EXPECTED 0xEF100FCB
+
+/* MC_SFT_STATUS: MC soft status */
+#define ER_GZ_MC_SFT_STATUS 0x00000010
+#define ER_GZ_MC_SFT_STATUS_STEP 4
+#define ER_GZ_MC_SFT_STATUS_ROWS 2
+
+/* MC_DB_LWRD_REG: MC doorbell register, low word */
+#define ER_GZ_MC_DB_LWRD 0x00000020
+
+/* MC_DB_HWRD_REG: MC doorbell register, high word */
+#define ER_GZ_MC_DB_HWRD 0x00000024
+
+/* EVQ_INT_PRIME: Prime EVQ */
+#define ER_GZ_EVQ_INT_PRIME 0x00000040
+#define ERF_GZ_IDX_LBN 16
+#define ERF_GZ_IDX_WIDTH 16
+#define ERF_GZ_EVQ_ID_LBN 0
+#define ERF_GZ_EVQ_ID_WIDTH 16
+
+/* INT_AGG_RING_PRIME: Prime interrupt aggregation ring. */
+#define ER_GZ_INT_AGG_RING_PRIME 0x00000048
+/* defined as ERF_GZ_IDX_LBN 16; access=WO reset=0x0 */
+/* defined as ERF_GZ_IDX_WIDTH 16 */
+#define ERF_GZ_RING_ID_LBN 0
+#define ERF_GZ_RING_ID_WIDTH 16
+
+/* EVQ_TMR: EVQ timer control */
+#define ER_GZ_EVQ_TMR 0x00000104
+#define ER_GZ_EVQ_TMR_STEP 65536
+#define ER_GZ_EVQ_TMR_ROWS 1024
+
+/* EVQ_UNSOL_CREDIT_GRANT_SEQ: Grant credits for unsolicited events. */
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ 0x00000108
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_STEP 65536
+#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_ROWS 1024
+
+/* EVQ_DESC_CREDIT_GRANT_SEQ: Grant credits for descriptor proxy events. */
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ 0x00000110
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_STEP 65536
+#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_ROWS 1024
+
+/* RX_RING_DOORBELL: Ring Rx doorbell. */
+#define ER_GZ_RX_RING_DOORBELL 0x00000180
+#define ER_GZ_RX_RING_DOORBELL_STEP 65536
+#define ER_GZ_RX_RING_DOORBELL_ROWS 1024
+#define ERF_GZ_RX_RING_PIDX_LBN 16
+#define ERF_GZ_RX_RING_PIDX_WIDTH 16
+
+/* TX_RING_DOORBELL: Ring Tx doorbell. */
+#define ER_GZ_TX_RING_DOORBELL 0x00000200
+#define ER_GZ_TX_RING_DOORBELL_STEP 65536
+#define ER_GZ_TX_RING_DOORBELL_ROWS 1024
+#define ERF_GZ_TX_RING_PIDX_LBN 16
+#define ERF_GZ_TX_RING_PIDX_WIDTH 16
+
+/* TX_DESC_PUSH: Tx ring descriptor push. Reserved for future use. */
+#define ER_GZ_TX_DESC_PUSH 0x00000210
+#define ER_GZ_TX_DESC_PUSH_STEP 65536
+#define ER_GZ_TX_DESC_PUSH_ROWS 1024
+
+/* THE_TIME: NIC hardware time */
+#define ER_GZ_THE_TIME 0x00000280
+#define ER_GZ_THE_TIME_STEP 65536
+#define ER_GZ_THE_TIME_ROWS 1024
+#define ERF_GZ_THE_TIME_SECS_LBN 32
+#define ERF_GZ_THE_TIME_SECS_WIDTH 32
+#define ERF_GZ_THE_TIME_NANOS_LBN 2
+#define ERF_GZ_THE_TIME_NANOS_WIDTH 30
+#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_LBN 1
+#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_WIDTH 1
+#define ERF_GZ_THE_TIME_CLOCK_IS_SET_LBN 0
+#define ERF_GZ_THE_TIME_CLOCK_IS_SET_WIDTH 1
+
+/* PARAMS_TLV_LEN: Size of design parameters area in bytes */
+#define ER_GZ_PARAMS_TLV_LEN 0x00000c00
+#define ER_GZ_PARAMS_TLV_LEN_STEP 65536
+#define ER_GZ_PARAMS_TLV_LEN_ROWS 1024
+
+/* PARAMS_TLV: Design parameters */
+#define ER_GZ_PARAMS_TLV 0x00000c04
+#define ER_GZ_PARAMS_TLV_STEP 65536
+#define ER_GZ_PARAMS_TLV_ROWS 1024
+
+/* EW_EMBEDDED_EVENT */
+#define ESF_GZ_EV_256_EVENT_LBN 0
+#define ESF_GZ_EV_256_EVENT_WIDTH 64
+#define ESE_GZ_EW_EMBEDDED_EVENT_STRUCT_SIZE 64
+
+/* NMMU_PAGESZ_2M_ADDR */
+#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_LBN 59
+#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_WIDTH 5
+#define ESE_GZ_NMMU_PAGE_SIZE_2M 9
+#define ESF_GZ_NMMU_2M_PAGE_ID_LBN 21
+#define ESF_GZ_NMMU_2M_PAGE_ID_WIDTH 38
+#define ESF_GZ_NMMU_2M_PAGE_OFFSET_LBN 0
+#define ESF_GZ_NMMU_2M_PAGE_OFFSET_WIDTH 21
+#define ESE_GZ_NMMU_PAGESZ_2M_ADDR_STRUCT_SIZE 64
+
+/* PARAM_TLV */
+#define ESF_GZ_TLV_VALUE_LBN 16
+#define ESF_GZ_TLV_VALUE_WIDTH 8
+#define ESE_GZ_TLV_VALUE_LENMIN 8
+#define ESE_GZ_TLV_VALUE_LENMAX 2040
+#define ESF_GZ_TLV_LEN_LBN 8
+#define ESF_GZ_TLV_LEN_WIDTH 8
+#define ESF_GZ_TLV_TYPE_LBN 0
+#define ESF_GZ_TLV_TYPE_WIDTH 8
+#define ESE_GZ_DP_NMMU_GROUP_SIZE 5
+#define ESE_GZ_DP_EVQ_UNSOL_CREDIT_SEQ_BITS 4
+#define ESE_GZ_DP_TX_EV_NUM_DESCS_BITS 3
+#define ESE_GZ_DP_RX_EV_NUM_PACKETS_BITS 2
+#define ESE_GZ_DP_PARTIAL_TSTAMP_SUB_NANO_BITS 1
+#define ESE_GZ_DP_PAD 0
+#define ESE_GZ_PARAM_TLV_STRUCT_SIZE 24
+
+/* PCI_EXPRESS_XCAP_HDR */
+#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_LBN 20
+#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_WIDTH 12
+#define ESF_GZ_PCI_EXPRESS_XCAP_VER_LBN 16
+#define ESF_GZ_PCI_EXPRESS_XCAP_VER_WIDTH 4
+#define ESE_GZ_PCI_EXPRESS_XCAP_VER_VSEC 1
+#define ESF_GZ_PCI_EXPRESS_XCAP_ID_LBN 0
+#define ESF_GZ_PCI_EXPRESS_XCAP_ID_WIDTH 16
+#define ESE_GZ_PCI_EXPRESS_XCAP_ID_VNDR 0xb
+#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_STRUCT_SIZE 32
+
+/* RHEAD_BASE_EVENT */
+#define ESF_GZ_E_TYPE_LBN 60
+#define ESF_GZ_E_TYPE_WIDTH 4
+#define ESE_GZ_EF100_EV_DRIVER 5
+#define ESE_GZ_EF100_EV_MCDI 4
+#define ESE_GZ_EF100_EV_CONTROL 3
+#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
+#define ESE_GZ_EF100_EV_TX_COMPLETION 1
+#define ESE_GZ_EF100_EV_RX_PKTS 0
+#define ESF_GZ_EV_EVQ_PHASE_LBN 59
+#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
+#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
+
+/* RHEAD_EW_EVENT */
+#define ESF_GZ_EV_256_EV32_PHASE_LBN 255
+#define ESF_GZ_EV_256_EV32_PHASE_WIDTH 1
+#define ESF_GZ_EV_256_EV32_TYPE_LBN 251
+#define ESF_GZ_EV_256_EV32_TYPE_WIDTH 4
+#define ESE_GZ_EF100_EVEW_VIRTQ_DESC 2
+#define ESE_GZ_EF100_EVEW_TXQ_DESC 1
+#define ESE_GZ_EF100_EVEW_64BIT 0
+#define ESE_GZ_RHEAD_EW_EVENT_STRUCT_SIZE 256
+
+/* RX_DESC */
+#define ESF_GZ_RX_BUF_ADDR_LBN 0
+#define ESF_GZ_RX_BUF_ADDR_WIDTH 64
+#define ESE_GZ_RX_DESC_STRUCT_SIZE 64
+
+/* TXQ_DESC_PROXY_EVENT */
+#define ESF_GZ_EV_TXQ_DP_VI_ID_LBN 128
+#define ESF_GZ_EV_TXQ_DP_VI_ID_WIDTH 16
+#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_LBN 0
+#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_WIDTH 128
+#define ESE_GZ_TXQ_DESC_PROXY_EVENT_STRUCT_SIZE 144
+
+/* TX_DESC_TYPE */
+#define ESF_GZ_TX_DESC_TYPE_LBN 124
+#define ESF_GZ_TX_DESC_TYPE_WIDTH 4
+#define ESE_GZ_TX_DESC_TYPE_DESC2CMPT 7
+#define ESE_GZ_TX_DESC_TYPE_MEM2MEM 4
+#define ESE_GZ_TX_DESC_TYPE_SEG 3
+#define ESE_GZ_TX_DESC_TYPE_TSO 2
+#define ESE_GZ_TX_DESC_TYPE_PREFIX 1
+#define ESE_GZ_TX_DESC_TYPE_SEND 0
+#define ESE_GZ_TX_DESC_TYPE_STRUCT_SIZE 128
+
+/* VIRTQ_DESC_PROXY_EVENT */
+#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_LBN 144
+#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_WIDTH 16
+#define ESF_GZ_EV_VQ_DP_VI_ID_LBN 128
+#define ESF_GZ_EV_VQ_DP_VI_ID_WIDTH 16
+#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_LBN 0
+#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_WIDTH 128
+#define ESE_GZ_VIRTQ_DESC_PROXY_EVENT_STRUCT_SIZE 160
+
+/* XIL_CFGBAR_TBL_ENTRY */
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_LBN 96
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_WIDTH 32
+#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_LBN 68
+#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_WIDTH 60
+#define ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT 4
+#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_LBN 67
+#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_WIDTH 29
+#define ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT 4
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_LBN 68
+#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_WIDTH 28
+#define ESF_GZ_CFGBAR_CONT_CAP_RSV_LBN 67
+#define ESF_GZ_CFGBAR_CONT_CAP_RSV_WIDTH 1
+#define ESF_GZ_CFGBAR_EF100_BAR_LBN 64
+#define ESF_GZ_CFGBAR_EF100_BAR_WIDTH 3
+#define ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID 7
+#define ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_CFGBAR_CONT_CAP_BAR_LBN 64
+#define ESF_GZ_CFGBAR_CONT_CAP_BAR_WIDTH 3
+#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID 7
+#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_CFGBAR_ENTRY_SIZE_LBN 32
+#define ESF_GZ_CFGBAR_ENTRY_SIZE_WIDTH 32
+#define ESE_GZ_CFGBAR_ENTRY_SIZE_EF100 12
+#define ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE 8
+#define ESF_GZ_CFGBAR_ENTRY_LAST_LBN 28
+#define ESF_GZ_CFGBAR_ENTRY_LAST_WIDTH 1
+#define ESF_GZ_CFGBAR_ENTRY_REV_LBN 20
+#define ESF_GZ_CFGBAR_ENTRY_REV_WIDTH 8
+#define ESE_GZ_CFGBAR_ENTRY_REV_EF100 0
+#define ESF_GZ_CFGBAR_ENTRY_FORMAT_LBN 0
+#define ESF_GZ_CFGBAR_ENTRY_FORMAT_WIDTH 20
+#define ESE_GZ_CFGBAR_ENTRY_LAST 0xfffff
+#define ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR 0xffffe
+#define ESE_GZ_CFGBAR_ENTRY_EF100 0xef100
+#define ESE_GZ_XIL_CFGBAR_TBL_ENTRY_STRUCT_SIZE 128
+
+/* XIL_CFGBAR_VSEC */
+#define ESF_GZ_VSEC_TBL_OFF_HI_LBN 64
+#define ESF_GZ_VSEC_TBL_OFF_HI_WIDTH 32
+#define ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT 32
+#define ESF_GZ_VSEC_TBL_OFF_LO_LBN 36
+#define ESF_GZ_VSEC_TBL_OFF_LO_WIDTH 28
+#define ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT 4
+#define ESF_GZ_VSEC_TBL_BAR_LBN 32
+#define ESF_GZ_VSEC_TBL_BAR_WIDTH 4
+#define ESE_GZ_VSEC_BAR_NUM_INVALID 7
+#define ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM 6
+#define ESF_GZ_VSEC_LEN_LBN 20
+#define ESF_GZ_VSEC_LEN_WIDTH 12
+#define ESE_GZ_VSEC_LEN_HIGH_OFFT 16
+#define ESE_GZ_VSEC_LEN_MIN 12
+#define ESF_GZ_VSEC_VER_LBN 16
+#define ESF_GZ_VSEC_VER_WIDTH 4
+#define ESE_GZ_VSEC_VER_XIL_CFGBAR 0
+#define ESF_GZ_VSEC_ID_LBN 0
+#define ESF_GZ_VSEC_ID_WIDTH 16
+#define ESE_GZ_XILINX_VSEC_ID 0x20
+#define ESE_GZ_XIL_CFGBAR_VSEC_STRUCT_SIZE 96
+
+/* rh_egres_hclass */
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_LBN 15
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_LBN 13
+#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_LBN 12
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_LBN 10
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN 8
+#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_LBN 5
+#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_WIDTH 3
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_LBN 3
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_WIDTH 2
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_LBN 2
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_WIDTH 1
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_LBN 0
+#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_WIDTH 2
+#define ESE_GZ_RH_EGRES_HCLASS_STRUCT_SIZE 16
+
+/* sf_driver */
+#define ESF_GZ_DRIVER_E_TYPE_LBN 60
+#define ESF_GZ_DRIVER_E_TYPE_WIDTH 4
+#define ESF_GZ_DRIVER_PHASE_LBN 59
+#define ESF_GZ_DRIVER_PHASE_WIDTH 1
+#define ESF_GZ_DRIVER_DATA_LBN 0
+#define ESF_GZ_DRIVER_DATA_WIDTH 59
+#define ESE_GZ_SF_DRIVER_STRUCT_SIZE 64
+
+/* sf_ev_rsvd */
+#define ESF_GZ_EV_RSVD_TBD_NEXT_LBN 34
+#define ESF_GZ_EV_RSVD_TBD_NEXT_WIDTH 3
+#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_LBN 30
+#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_WIDTH 4
+#define ESF_GZ_EV_RSVD_SRC_QID_LBN 18
+#define ESF_GZ_EV_RSVD_SRC_QID_WIDTH 12
+#define ESF_GZ_EV_RSVD_SEQ_NUM_LBN 2
+#define ESF_GZ_EV_RSVD_SEQ_NUM_WIDTH 16
+#define ESF_GZ_EV_RSVD_TBD_LBN 0
+#define ESF_GZ_EV_RSVD_TBD_WIDTH 2
+#define ESE_GZ_SF_EV_RSVD_STRUCT_SIZE 37
+
+/* sf_flush_evnt */
+#define ESF_GZ_EV_FLSH_E_TYPE_LBN 60
+#define ESF_GZ_EV_FLSH_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_FLSH_PHASE_LBN 59
+#define ESF_GZ_EV_FLSH_PHASE_WIDTH 1
+#define ESF_GZ_EV_FLSH_SUB_TYPE_LBN 53
+#define ESF_GZ_EV_FLSH_SUB_TYPE_WIDTH 6
+#define ESF_GZ_EV_FLSH_RSVD_LBN 10
+#define ESF_GZ_EV_FLSH_RSVD_WIDTH 43
+#define ESF_GZ_EV_FLSH_LABEL_LBN 4
+#define ESF_GZ_EV_FLSH_LABEL_WIDTH 6
+#define ESF_GZ_EV_FLSH_FLUSH_TYPE_LBN 0
+#define ESF_GZ_EV_FLSH_FLUSH_TYPE_WIDTH 4
+#define ESE_GZ_SF_FLUSH_EVNT_STRUCT_SIZE 64
+
+/* sf_rx_pkts */
+#define ESF_GZ_EV_RXPKTS_E_TYPE_LBN 60
+#define ESF_GZ_EV_RXPKTS_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_RXPKTS_PHASE_LBN 59
+#define ESF_GZ_EV_RXPKTS_PHASE_WIDTH 1
+#define ESF_GZ_EV_RXPKTS_RSVD_LBN 22
+#define ESF_GZ_EV_RXPKTS_RSVD_WIDTH 37
+#define ESF_GZ_EV_RXPKTS_Q_LABEL_LBN 16
+#define ESF_GZ_EV_RXPKTS_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_RXPKTS_NUM_PKT_LBN 0
+#define ESF_GZ_EV_RXPKTS_NUM_PKT_WIDTH 16
+#define ESE_GZ_SF_RX_PKTS_STRUCT_SIZE 64
+
+/* sf_rx_prefix */
+#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_LBN 160
+#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
+#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
+#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
+#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
+#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
+#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
+#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
+#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
+#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
+#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
+#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
+#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
+#define ESF_GZ_RX_PREFIX_USER_FLAG_WIDTH 1
+#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN 14
+#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_WIDTH 1
+#define ESF_GZ_RX_PREFIX_LENGTH_LBN 0
+#define ESF_GZ_RX_PREFIX_LENGTH_WIDTH 14
+#define ESE_GZ_SF_RX_PREFIX_STRUCT_SIZE 176
+
+/* sf_rxtx_generic */
+#define ESF_GZ_EV_BARRIER_LBN 167
+#define ESF_GZ_EV_BARRIER_WIDTH 1
+#define ESF_GZ_EV_RSVD_LBN 130
+#define ESF_GZ_EV_RSVD_WIDTH 37
+#define ESF_GZ_EV_DPRXY_LBN 129
+#define ESF_GZ_EV_DPRXY_WIDTH 1
+#define ESF_GZ_EV_VIRTIO_LBN 128
+#define ESF_GZ_EV_VIRTIO_WIDTH 1
+#define ESF_GZ_EV_COUNT_LBN 0
+#define ESF_GZ_EV_COUNT_WIDTH 128
+#define ESE_GZ_SF_RXTX_GENERIC_STRUCT_SIZE 168
+
+/* sf_ts_stamp */
+#define ESF_GZ_EV_TS_E_TYPE_LBN 60
+#define ESF_GZ_EV_TS_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_TS_PHASE_LBN 59
+#define ESF_GZ_EV_TS_PHASE_WIDTH 1
+#define ESF_GZ_EV_TS_RSVD_LBN 56
+#define ESF_GZ_EV_TS_RSVD_WIDTH 3
+#define ESF_GZ_EV_TS_STATUS_LBN 54
+#define ESF_GZ_EV_TS_STATUS_WIDTH 2
+#define ESF_GZ_EV_TS_Q_LABEL_LBN 48
+#define ESF_GZ_EV_TS_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_TS_DESC_ID_LBN 32
+#define ESF_GZ_EV_TS_DESC_ID_WIDTH 16
+#define ESF_GZ_EV_TS_PARTIAL_STAMP_LBN 0
+#define ESF_GZ_EV_TS_PARTIAL_STAMP_WIDTH 32
+#define ESE_GZ_SF_TS_STAMP_STRUCT_SIZE 64
+
+/* sf_tx_cmplt */
+#define ESF_GZ_EV_TXCMPL_E_TYPE_LBN 60
+#define ESF_GZ_EV_TXCMPL_E_TYPE_WIDTH 4
+#define ESF_GZ_EV_TXCMPL_PHASE_LBN 59
+#define ESF_GZ_EV_TXCMPL_PHASE_WIDTH 1
+#define ESF_GZ_EV_TXCMPL_RSVD_LBN 22
+#define ESF_GZ_EV_TXCMPL_RSVD_WIDTH 37
+#define ESF_GZ_EV_TXCMPL_Q_LABEL_LBN 16
+#define ESF_GZ_EV_TXCMPL_Q_LABEL_WIDTH 6
+#define ESF_GZ_EV_TXCMPL_NUM_DESC_LBN 0
+#define ESF_GZ_EV_TXCMPL_NUM_DESC_WIDTH 16
+#define ESE_GZ_SF_TX_CMPLT_STRUCT_SIZE 64
+
+/* sf_tx_desc2cmpt_dsc_fmt */
+#define ESF_GZ_D2C_TGT_VI_ID_LBN 108
+#define ESF_GZ_D2C_TGT_VI_ID_WIDTH 16
+#define ESF_GZ_D2C_CMPT2_LBN 107
+#define ESF_GZ_D2C_CMPT2_WIDTH 1
+#define ESF_GZ_D2C_ABS_VI_ID_LBN 106
+#define ESF_GZ_D2C_ABS_VI_ID_WIDTH 1
+#define ESF_GZ_D2C_ORDERED_LBN 105
+#define ESF_GZ_D2C_ORDERED_WIDTH 1
+#define ESF_GZ_D2C_SKIP_N_LBN 97
+#define ESF_GZ_D2C_SKIP_N_WIDTH 8
+#define ESF_GZ_D2C_RSVD_LBN 64
+#define ESF_GZ_D2C_RSVD_WIDTH 33
+#define ESF_GZ_D2C_COMPLETION_LBN 0
+#define ESF_GZ_D2C_COMPLETION_WIDTH 64
+#define ESE_GZ_SF_TX_DESC2CMPT_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_mem2mem_dsc_fmt */
+#define ESF_GZ_M2M_ADDR_SPC_EN_LBN 123
+#define ESF_GZ_M2M_ADDR_SPC_EN_WIDTH 1
+#define ESF_GZ_M2M_TRANSLATE_ADDR_LBN 122
+#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
+#define ESF_GZ_M2M_RSVD_LBN 120
+#define ESF_GZ_M2M_RSVD_WIDTH 2
+#define ESF_GZ_M2M_ADDR_SPC_LBN 108
+#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
+#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
+#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
+#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
+#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
+#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
+#define ESF_GZ_M2M_ADDR_LBN 0
+#define ESF_GZ_M2M_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_MEM2MEM_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_ovr_dsc_fmt */
+#define ESF_GZ_TX_PREFIX_MARK_EN_LBN 123
+#define ESF_GZ_TX_PREFIX_MARK_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_LBN 122
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_LBN 121
+#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_WIDTH 1
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_LBN 120
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_WIDTH 1
+#define ESF_GZ_TX_PREFIX_RSRVD_LBN 64
+#define ESF_GZ_TX_PREFIX_RSRVD_WIDTH 56
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_LBN 48
+#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_WIDTH 16
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_LBN 32
+#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_WIDTH 16
+#define ESF_GZ_TX_PREFIX_MARK_LBN 0
+#define ESF_GZ_TX_PREFIX_MARK_WIDTH 32
+#define ESE_GZ_SF_TX_OVR_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_seg_dsc_fmt */
+#define ESF_GZ_TX_SEG_ADDR_SPC_EN_LBN 123
+#define ESF_GZ_TX_SEG_ADDR_SPC_EN_WIDTH 1
+#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_LBN 122
+#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
+#define ESF_GZ_TX_SEG_RSVD2_LBN 120
+#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
+#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
+#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
+#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
+#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
+#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
+#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_TX_SEG_RSVD_LBN 80
+#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
+#define ESF_GZ_TX_SEG_LEN_LBN 64
+#define ESF_GZ_TX_SEG_LEN_WIDTH 16
+#define ESF_GZ_TX_SEG_ADDR_LBN 0
+#define ESF_GZ_TX_SEG_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_SEG_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_std_dsc_fmt */
+#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_LBN 108
+#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_WIDTH 16
+#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_LBN 107
+#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_WIDTH 1
+#define ESF_GZ_TX_SEND_TSTAMP_REQ_LBN 106
+#define ESF_GZ_TX_SEND_TSTAMP_REQ_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_OUTER_L4_LBN 105
+#define ESF_GZ_TX_SEND_CSO_OUTER_L4_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_OUTER_L3_LBN 104
+#define ESF_GZ_TX_SEND_CSO_OUTER_L3_WIDTH 1
+#define ESF_GZ_TX_SEND_CSO_INNER_L3_LBN 101
+#define ESF_GZ_TX_SEND_CSO_INNER_L3_WIDTH 3
+#define ESF_GZ_TX_SEND_RSVD_LBN 99
+#define ESF_GZ_TX_SEND_RSVD_WIDTH 2
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_LBN 97
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_WIDTH 2
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_LBN 92
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_WIDTH 5
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_LBN 83
+#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_WIDTH 9
+#define ESF_GZ_TX_SEND_NUM_SEGS_LBN 78
+#define ESF_GZ_TX_SEND_NUM_SEGS_WIDTH 5
+#define ESF_GZ_TX_SEND_LEN_LBN 64
+#define ESF_GZ_TX_SEND_LEN_WIDTH 14
+#define ESF_GZ_TX_SEND_ADDR_LBN 0
+#define ESF_GZ_TX_SEND_ADDR_WIDTH 64
+#define ESE_GZ_SF_TX_STD_DSC_FMT_STRUCT_SIZE 124
+
+/* sf_tx_tso_dsc_fmt */
+#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_LBN 108
+#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_WIDTH 16
+#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_LBN 107
+#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_WIDTH 1
+#define ESF_GZ_TX_TSO_TSTAMP_REQ_LBN 106
+#define ESF_GZ_TX_TSO_TSTAMP_REQ_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_OUTER_L4_LBN 105
+#define ESF_GZ_TX_TSO_CSO_OUTER_L4_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_OUTER_L3_LBN 104
+#define ESF_GZ_TX_TSO_CSO_OUTER_L3_WIDTH 1
+#define ESF_GZ_TX_TSO_CSO_INNER_L3_LBN 101
+#define ESF_GZ_TX_TSO_CSO_INNER_L3_WIDTH 3
+#define ESF_GZ_TX_TSO_RSVD_LBN 94
+#define ESF_GZ_TX_TSO_RSVD_WIDTH 7
+#define ESF_GZ_TX_TSO_CSO_INNER_L4_LBN 93
+#define ESF_GZ_TX_TSO_CSO_INNER_L4_WIDTH 1
+#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_LBN 85
+#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_LBN 77
+#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_LBN 69
+#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_WIDTH 8
+#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_LBN 64
+#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_WIDTH 5
+#define ESF_GZ_TX_TSO_PAYLOAD_LEN_LBN 42
+#define ESF_GZ_TX_TSO_PAYLOAD_LEN_WIDTH 22
+#define ESF_GZ_TX_TSO_HDR_LEN_W_LBN 34
+#define ESF_GZ_TX_TSO_HDR_LEN_W_WIDTH 8
+#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_LBN 33
+#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_LBN 32
+#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_LBN 31
+#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_WIDTH 1
+#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_LBN 29
+#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_WIDTH 2
+#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_LBN 27
+#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_WIDTH 2
+#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_LBN 17
+#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_WIDTH 10
+#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_LBN 14
+#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_WIDTH 3
+#define ESF_GZ_TX_TSO_MSS_LBN 0
+#define ESF_GZ_TX_TSO_MSS_WIDTH 14
+#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
+
+
+/* Enum DESIGN_PARAMS */
+#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
+#define ESE_EF100_DP_GZ_VI_STRIDES 16
+#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES 15
+#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS 14
+#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN 13
+#define ESE_EF100_DP_GZ_COMPAT 12
+#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES 11
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS 10
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN 9
+#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY 8
+#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY 7
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS 6
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN 5
+#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS 4
+#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE 3
+#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS 2
+#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS 1
+#define ESE_EF100_DP_GZ_PAD 0
+
+/* Enum DESIGN_PARAM_DEFAULTS */
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT 0x3fffff
+#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT 8192
+#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN_DEFAULT 8192
+#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS_DEFAULT 0x1106
+#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT 0x3ff
+#define ESE_EF100_DP_GZ_RX_MAX_RUNT_DEFAULT 640
+#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS_DEFAULT 512
+#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES_DEFAULT 512
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT 192
+#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY_DEFAULT 64
+#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY_DEFAULT 64
+#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE_DEFAULT 32
+#define ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT 16
+#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS_DEFAULT 7
+#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT 4
+#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS_DEFAULT 2
+#define ESE_EF100_DP_GZ_COMPAT_DEFAULT 0
+
+/* Enum HOST_IF_CONSTANTS */
+#define ESE_GZ_FCW_LEN 0x4C
+#define ESE_GZ_RX_PKT_PREFIX_LEN 22
+
+/* Enum PCI_CONSTANTS */
+#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
+#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
+
+/* Enum RH_HCLASS_L2_CLASS */
+#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
+#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
+
+/* Enum RH_HCLASS_L2_STATUS */
+#define ESE_GZ_RH_HCLASS_L2_STATUS_RESERVED 3
+#define ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR 2
+#define ESE_GZ_RH_HCLASS_L2_STATUS_LEN_ERR 1
+#define ESE_GZ_RH_HCLASS_L2_STATUS_OK 0
+
+/* Enum RH_HCLASS_L3_CLASS */
+#define ESE_GZ_RH_HCLASS_L3_CLASS_OTHER 3
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP6 2
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD 1
+#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD 0
+
+/* Enum RH_HCLASS_L4_CLASS */
+#define ESE_GZ_RH_HCLASS_L4_CLASS_OTHER 3
+#define ESE_GZ_RH_HCLASS_L4_CLASS_FRAG 2
+#define ESE_GZ_RH_HCLASS_L4_CLASS_UDP 1
+#define ESE_GZ_RH_HCLASS_L4_CLASS_TCP 0
+
+/* Enum RH_HCLASS_L4_CSUM */
+#define ESE_GZ_RH_HCLASS_L4_CSUM_GOOD 1
+#define ESE_GZ_RH_HCLASS_L4_CSUM_BAD_OR_UNKNOWN 0
+
+/* Enum RH_HCLASS_TUNNEL_CLASS */
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_7 7
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_6 6
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_5 5
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_4 4
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE 3
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE 2
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
+#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
+
+/* Enum TX_DESC_CSO_PARTIAL_EN */
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
+#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF 0
+
+/* Enum TX_DESC_CS_INNER_L3 */
+#define ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE 3
+#define ESE_GZ_TX_DESC_CS_INNER_L3_NVGRE 2
+#define ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN 1
+#define ESE_GZ_TX_DESC_CS_INNER_L3_OFF 0
+
+/* Enum TX_DESC_IP4_ID */
+#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
+#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
+#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
+/**************************************************************************/
+
+#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
+#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_WIDTH 4
+#define ESF_GZ_EV_DEBUG_SRC_QID_LBN 32
+#define ESF_GZ_EV_DEBUG_SRC_QID_WIDTH 12
+#define ESF_GZ_EV_DEBUG_SEQ_NUM_LBN 16
+#define ESF_GZ_EV_DEBUG_SEQ_NUM_WIDTH 16
+
+#endif /* EFX_EF100_REGS_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
new file mode 100644
index 000000000000..13ba1a4f66fc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef100_rx.h"
+#include "rx_common.h"
+#include "efx.h"
+#include "nic_common.h"
+#include "mcdi_functions.h"
+#include "ef100_regs.h"
+#include "ef100_nic.h"
+#include "io.h"
+
+/* Get the value of a field in the RX prefix */
+#define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
+#define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
+#define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
+#define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
+#define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
+ PREFIX_WIDTH_MASK(_f))
+
+#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
+ (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
+#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
+ ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
+
+static bool check_fcs(struct efx_channel *channel, u32 *prefix)
+{
+ u16 rxclass;
+ u8 l2status;
+
+ rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
+ l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
+
+ if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
+ /* Everything is ok */
+ return 0;
+
+ if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
+ channel->n_rx_eth_crc_err++;
+ return 1;
+}
+
+void __ef100_rx_packet(struct efx_channel *channel)
+{
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_nic *efx = channel->efx;
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ __wsum csum = 0;
+ u32 *prefix;
+
+ prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
+
+ if (check_fcs(channel, prefix) &&
+ unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
+ goto out;
+
+ rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
+ if (rx_buf->len <= sizeof(struct ethhdr)) {
+ if (net_ratelimit())
+ netif_err(channel->efx, rx_err, channel->efx->net_dev,
+ "RX packet too small (%d)\n", rx_buf->len);
+ ++channel->n_rx_frm_trunc;
+ goto out;
+ }
+
+ if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
+ if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
+ ++channel->n_rx_ip_hdr_chksum_err;
+ } else {
+ u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
+
+ csum = (__force __wsum) sum;
+ }
+ }
+
+ if (channel->type->receive_skb) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ /* no support for special channels yet, so just discard */
+ WARN_ON_ONCE(1);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ goto out;
+ }
+
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+
+out:
+ channel->rx_pkt_n_frags = 0;
+}
+
+static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+
+ ++rx_queue->rx_packets;
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received id %x\n",
+ efx_rx_queue_index(rx_queue), index);
+
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->rx_prefix_size;
+
+ efx_recycle_rx_pages(channel, rx_buf, 1);
+
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = 1;
+ channel->rx_pkt_index = index;
+}
+
+void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ unsigned int n_packets =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
+ int i;
+
+ WARN_ON_ONCE(!n_packets);
+ if (n_packets > 1)
+ ++channel->n_rx_merge_events;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ for (i = 0; i < n_packets; ++i) {
+ ef100_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask);
+ ++rx_queue->removed_count;
+ }
+}
+
+void ef100_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ unsigned int idx;
+ efx_qword_t *rxd;
+ efx_dword_t rxdb;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ idx = rx_queue->notified_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, idx);
+ rxd = efx_rx_desc(rx_queue, idx);
+
+ EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
+
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
+ rx_queue->added_count & rx_queue->ptr_mask);
+ efx_writed_page(rx_queue->efx, &rxdb,
+ ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h
new file mode 100644
index 000000000000..f2f266863966
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_RX_H
+#define EFX_EF100_RX_H
+
+#include "net_driver.h"
+
+void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
+void ef100_rx_write(struct efx_rx_queue *rx_queue);
+void __ef100_rx_packet(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
new file mode 100644
index 000000000000..a09546e43408
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <net/ip6_checksum.h>
+
+#include "net_driver.h"
+#include "tx_common.h"
+#include "nic_common.h"
+#include "mcdi_functions.h"
+#include "ef100_regs.h"
+#include "io.h"
+#include "ef100_tx.h"
+#include "ef100_nic.h"
+
+int ef100_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ /* Allocate an extra descriptor for the QMDA status completion entry */
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 2) *
+ sizeof(efx_oword_t),
+ GFP_KERNEL);
+ return 0;
+}
+
+void ef100_tx_init(struct efx_tx_queue *tx_queue)
+{
+ /* must be the inverse of lookup in efx_get_tx_channel */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(tx_queue->efx->net_dev,
+ tx_queue->channel->channel -
+ tx_queue->efx->tx_channel_offset);
+
+ if (efx_mcdi_tx_init(tx_queue, false))
+ netdev_WARN(tx_queue->efx->net_dev,
+ "failed to initialise TXQ %d\n", tx_queue->queue);
+}
+
+static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct ef100_nic_data *nic_data;
+ struct efx_tx_buffer *buffer;
+ struct tcphdr *tcphdr;
+ struct iphdr *iphdr;
+ size_t header_len;
+ u32 mss;
+
+ nic_data = efx->nic_data;
+
+ if (!skb_is_gso_tcp(skb))
+ return false;
+ if (!(efx->net_dev->features & NETIF_F_TSO))
+ return false;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(mss < 4)) {
+ WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss);
+ return false;
+ }
+
+ header_len = efx_tx_tso_header_length(skb);
+ if (header_len > nic_data->tso_max_hdr_len)
+ return false;
+
+ if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) {
+ /* net_dev->gso_max_segs should've caught this */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ if (skb->data_len / mss > nic_data->tso_max_frames)
+ return false;
+
+ /* net_dev->gso_max_size should've caught this */
+ if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len))
+ return false;
+
+ /* Reserve an empty buffer for the TSO V3 descriptor.
+ * Convey the length of the header since we already know it.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT;
+ buffer->len = header_len;
+ buffer->unmap_len = 0;
+ buffer->skb = skb;
+ ++tx_queue->insert_count;
+
+ /* Adjust the TCP checksum to exclude the total length, since we set
+ * ED_INNER_IP_LEN in the descriptor.
+ */
+ tcphdr = tcp_hdr(skb);
+ if (skb_is_gso_v6(skb)) {
+ tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else {
+ iphdr = ip_hdr(skb);
+ tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+ return true;
+}
+
+static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ if (likely(tx_queue->txd.buf.addr))
+ return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
+ else
+ return NULL;
+}
+
+void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ if (unlikely(tx_queue->notify_count == tx_queue->write_count))
+ return;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ /* The write pointer goes into the high word */
+ EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
+ tx_queue->notify_count = tx_queue->write_count;
+ tx_queue->xmit_more_available = false;
+}
+
+static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ ef100_notify_tx_desc(tx_queue);
+ ++tx_queue->pushes;
+}
+
+static void ef100_set_tx_csum_partial(const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd)
+{
+ efx_oword_t csum;
+ int csum_start;
+
+ if (!skb || skb->ip_summed != CHECKSUM_PARTIAL)
+ return;
+
+ /* skb->csum_start has the offset from head, but we need the offset
+ * from data.
+ */
+ csum_start = skb_checksum_start_offset(skb);
+ EFX_POPULATE_OWORD_3(csum,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_START_W,
+ csum_start >> 1,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W,
+ skb->csum_offset >> 1);
+ EFX_OR_OWORD(*txd, *txd, csum);
+}
+
+static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd)
+{
+ u16 vlan_tci = skb_vlan_tag_get(skb);
+ efx_oword_t vlan;
+
+ EFX_POPULATE_OWORD_2(vlan,
+ ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
+ ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci);
+ EFX_OR_OWORD(*txd, *txd, vlan);
+}
+
+static void ef100_make_send_desc(struct efx_nic *efx,
+ const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd,
+ unsigned int segment_count)
+{
+ /* TX send descriptor */
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_SEND_NUM_SEGS, segment_count,
+ ESF_GZ_TX_SEND_LEN, buffer->len,
+ ESF_GZ_TX_SEND_ADDR, buffer->dma_addr);
+
+ if (likely(efx->net_dev->features & NETIF_F_HW_CSUM))
+ ef100_set_tx_csum_partial(skb, buffer, txd);
+ if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb && skb_vlan_tag_present(skb))
+ ef100_set_tx_hw_vlan(skb, txd);
+}
+
+static void ef100_make_tso_desc(struct efx_nic *efx,
+ const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd,
+ unsigned int segment_count)
+{
+ u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ?
+ ESE_GZ_TX_DESC_IP4_ID_NO_OP :
+ ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
+ u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ?
+ skb_vlan_tag_present(skb) : 0;
+ unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u16 vlan_tci = skb_vlan_tag_get(skb);
+ u32 mss = skb_shinfo(skb)->gso_size;
+
+ len = skb->len - buffer->len;
+ /* We use 1 for the TSO descriptor and 1 for the header */
+ payload_segs = segment_count - 2;
+ ip_offset = skb_network_offset(skb);
+ tcp_offset = skb_transport_offset(skb);
+
+ EFX_POPULATE_OWORD_13(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
+ ESF_GZ_TX_TSO_MSS, mss,
+ ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
+ ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
+ ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
+ ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
+ ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
+ ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
+ ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
+ ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
+ ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
+ ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
+ ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
+ );
+}
+
+static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ unsigned int new_write_count = old_write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int next_desc_type;
+ unsigned int write_ptr;
+ efx_oword_t *txd;
+ unsigned int nr_descs = tx_queue->insert_count - old_write_count;
+
+ if (unlikely(nr_descs == 0))
+ return;
+
+ if (segment_count)
+ next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO;
+ else
+ next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+
+ /* if it's a raw write (such as XDP) then always SEND single frames */
+ if (!skb)
+ nr_descs = 1;
+
+ do {
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ /* Create TX descriptor ring entry */
+ tx_queue->packet_write_count = new_write_count;
+
+ switch (next_desc_type) {
+ case ESE_GZ_TX_DESC_TYPE_SEND:
+ ef100_make_send_desc(tx_queue->efx, skb,
+ buffer, txd, nr_descs);
+ break;
+ case ESE_GZ_TX_DESC_TYPE_TSO:
+ /* TX TSO descriptor */
+ WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3));
+ ef100_make_tso_desc(tx_queue->efx, skb,
+ buffer, txd, nr_descs);
+ break;
+ default:
+ /* TX segment descriptor */
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG,
+ ESF_GZ_TX_SEG_LEN, buffer->len,
+ ESF_GZ_TX_SEG_ADDR, buffer->dma_addr);
+ }
+ /* if it's a raw write (such as XDP) then always SEND */
+ next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
+ ESE_GZ_TX_DESC_TYPE_SEND;
+
+ } while (new_write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ tx_queue->write_count = new_write_count;
+
+ /* The write_count above must be updated before reading
+ * channel->holdoff_doorbell to avoid a race with the
+ * completion path, so ensure these operations are not
+ * re-ordered. This also flushes the update of write_count
+ * back into the cache.
+ */
+ smp_mb();
+}
+
+void ef100_tx_write(struct efx_tx_queue *tx_queue)
+{
+ ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_push_buffers(tx_queue);
+}
+
+void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+{
+ unsigned int tx_done =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
+ unsigned int qlabel =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL);
+ struct efx_tx_queue *tx_queue =
+ efx_channel_get_tx_queue(channel, qlabel);
+ unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
+ tx_queue->ptr_mask;
+
+ efx_xmit_done(tx_queue, tx_index);
+}
+
+/* Add a socket buffer to a TX queue
+ *
+ * You must hold netif_tx_lock() to call this function.
+ *
+ * Returns 0 on success, error code otherwise. In case of an error this
+ * function will free the SKB.
+ */
+int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ unsigned int old_insert_count = tx_queue->insert_count;
+ struct efx_nic *efx = tx_queue->efx;
+ bool xmit_more = netdev_xmit_more();
+ unsigned int fill_level;
+ unsigned int segments;
+ int rc;
+
+ if (!tx_queue->buffer || !tx_queue->ptr_mask) {
+ netif_stop_queue(efx->net_dev);
+ dev_kfree_skb_any(skb);
+ return -ENODEV;
+ }
+
+ segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
+ if (segments == 1)
+ segments = 0; /* Don't use TSO/GSO for a single segment. */
+ if (segments && !ef100_tx_can_tso(tx_queue, skb)) {
+ rc = efx_tx_tso_fallback(tx_queue, skb);
+ tx_queue->tso_fallbacks++;
+ if (rc)
+ goto err;
+ else
+ return 0;
+ }
+
+ /* Map for DMA and create descriptors */
+ rc = efx_tx_map_data(tx_queue, skb, segments);
+ if (rc)
+ goto err;
+ ef100_tx_make_descriptors(tx_queue, skb, segments);
+
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ if (fill_level > efx->txq_stop_thresh) {
+ netif_tx_stop_queue(tx_queue->core_txq);
+ /* Re-read after a memory barrier in case we've raced with
+ * the completion path. Otherwise there's a danger we'll never
+ * restart the queue if all completions have just happened.
+ */
+ smp_mb();
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ if (fill_level < efx->txq_stop_thresh)
+ netif_tx_start_queue(tx_queue->core_txq);
+ }
+
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more))
+ tx_queue->xmit_more_available = false; /* push doorbell */
+ else if (tx_queue->write_count - tx_queue->notify_count > 255)
+ /* Ensure we never push more than 256 packets at once */
+ tx_queue->xmit_more_available = false; /* push */
+ else
+ tx_queue->xmit_more_available = true; /* don't push yet */
+
+ if (!tx_queue->xmit_more_available)
+ ef100_tx_push_buffers(tx_queue);
+
+ if (segments) {
+ tx_queue->tso_bursts++;
+ tx_queue->tso_packets += segments;
+ tx_queue->tx_packets += segments;
+ } else {
+ tx_queue->tx_packets++;
+ }
+ return 0;
+
+err:
+ efx_enqueue_unwind(tx_queue, old_insert_count);
+ if (!IS_ERR_OR_NULL(skb))
+ dev_kfree_skb_any(skb);
+
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue then we need to push here to get the previous packets
+ * out. We only enter this branch from before the 'Update BQL' section
+ * above, so xmit_more_available still refers to the old state.
+ */
+ if (tx_queue->xmit_more_available && !xmit_more)
+ ef100_tx_push_buffers(tx_queue);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
new file mode 100644
index 000000000000..fa23e430bdd7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF100_TX_H
+#define EFX_EF100_TX_H
+
+#include "net_driver.h"
+
+int ef100_tx_probe(struct efx_tx_queue *tx_queue);
+void ef100_tx_init(struct efx_tx_queue *tx_queue);
+void ef100_tx_write(struct efx_tx_queue *tx_queue);
+void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue);
+unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
+
+void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
+
+netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d60acaa3879d..e06fa89f2d72 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -25,6 +25,7 @@
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
+#include "ef100.h"
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
@@ -1355,8 +1356,14 @@ static int __init efx_init_module(void)
if (rc < 0)
goto err_pci;
+ rc = pci_register_driver(&ef100_pci_driver);
+ if (rc < 0)
+ goto err_pci_ef100;
+
return 0;
+ err_pci_ef100:
+ pci_unregister_driver(&efx_pci_driver);
err_pci:
efx_destroy_reset_workqueue();
err_reset:
@@ -1373,6 +1380,7 @@ static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
+ pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index e7e7d8d1a07b..a9808e86068d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -8,7 +8,10 @@
#ifndef EFX_EFX_H
#define EFX_EFX_H
+#include <linux/indirect_call_wrapper.h>
#include "net_driver.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -18,13 +21,18 @@ int efx_net_stop(struct net_device *net_dev);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
+ ef100_enqueue_skb, __efx_enqueue_skb,
+ tx_queue, skb);
+}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
-extern bool efx_separate_tx_channels;
/* RX */
void __efx_rx_packet(struct efx_channel *channel);
@@ -33,7 +41,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
- __efx_rx_packet(channel);
+ INDIRECT_CALL_2(channel->efx->type->rx_packet,
+ __ef100_rx_packet, __efx_rx_packet,
+ channel);
}
/* Maximum number of TCP segments we support for soft-TSO */
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 5667694c6514..26b05ec4e712 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -814,14 +814,18 @@ fail:
*/
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
+ int rc, rc2 = 0;
bool disabled;
- int rc, rc2;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
+ /* efx_reset_down() grabs locks that prevent recovery on EF100.
+ * EF100 reset is handled in the efx_nic_type callback below.
+ */
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
@@ -849,7 +853,8 @@ out:
disabled = rc ||
method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 9828516bd82d..4ffda7782f68 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -221,8 +221,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
return 0;
}
-const char *efx_driver_name = KBUILD_MODNAME;
-
const struct ethtool_ops efx_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -232,7 +230,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_regs = efx_ethtool_get_regs,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
- .nway_reset = efx_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index e9a5a66529bf..05ac87807929 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, efx_driver_name, sizeof(info->driver));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
@@ -173,14 +173,6 @@ fail:
test->flags |= ETH_TEST_FL_FAILED;
}
-/* Restart autonegotiation */
-int efx_ethtool_nway_reset(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return mdio45_nway_restart(&efx->mdio);
-}
-
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 3f3aaa92fbb5..659491932101 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -11,15 +11,12 @@
#ifndef EFX_ETHTOOL_COMMON_H
#define EFX_ETHTOOL_COMMON_H
-extern const char *efx_driver_name;
-
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info);
u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data);
-int efx_ethtool_nway_reset(struct net_device *net_dev);
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause);
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index c3c011bc6a68..30439cc83a89 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -75,6 +75,11 @@
#endif
#endif
+static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
+{
+ return efx->reg_base + reg;
+}
+
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
@@ -217,8 +222,11 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* default VI stride (step between per-VI registers) is 8K */
-#define EFX_DEFAULT_VI_STRIDE 0x2000
+/* default VI stride (step between per-VI registers) is 8K on EF10 and
+ * 64K on EF100
+ */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
+#define EF100_DEFAULT_VI_STRIDE 0x10000
/* Calculate offset to page-mapped register */
static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
@@ -265,7 +273,9 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ BUILD_BUG_ON_ZERO((reg) != 0x180 && \
+ (reg) != 0x200 && \
+ (reg) != 0x400 && \
(reg) != 0x420 && \
(reg) != 0x830 && \
(reg) != 0x83c && \
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 6c49740a178e..5467819aef6e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1337,7 +1337,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_process_link_change(efx, event);
break;
case MCDI_EVENT_CODE_SENSOREVT:
- efx_mcdi_sensor_event(efx, event);
+ efx_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
netif_dbg(efx, hw, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index e053adfe82b0..658cf345420d 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -327,10 +327,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
#define MCDI_CAPABILITY(field) \
- MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
#define MCDI_CAPABILITY_OFST(field) \
- MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
#define efx_has_cap(efx, field) \
efx->type->check_caps(efx, \
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 786fda559976..7bb7ecb480ae 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -173,6 +173,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
+#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -245,6 +246,7 @@ struct efx_tx_buffer {
* @pio_packets: Number of times the TX PIO feature has been used
* @xmit_more_available: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
+ * @notify_count: Count of notified descriptors to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -292,6 +294,7 @@ struct efx_tx_queue {
unsigned int pio_packets;
bool xmit_more_available;
unsigned int cb_packets;
+ unsigned int notify_count;
/* Statistics to supplement MAC stats */
unsigned long tx_packets;
@@ -963,7 +966,9 @@ struct efx_async_filter_insertion {
* @vpd_sn: Serial number read from VPD
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
+ * @netdev_notifier: Netdevice notifier.
* @mem_bar: The BAR that is mapped into membase.
+ * @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1141,7 +1146,10 @@ struct efx_nic {
char *vpd_sn;
bool xdp_rxq_info_failed;
+ struct notifier_block netdev_notifier;
+
unsigned int mem_bar;
+ u32 reg_base;
/* The following fields may be written more often */
@@ -1244,6 +1252,7 @@ struct efx_udp_tunnel {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
+ * @tx_enqueue: Add an SKB to TX queue
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
* @rx_push_rss_context_config: Write RSS hash key and indirection table for
@@ -1255,6 +1264,7 @@ struct efx_udp_tunnel {
* @rx_remove: Free resources for RX queue
* @rx_write: Write RX descriptors and doorbell
* @rx_defer_refill: Generate a refill reminder event
+ * @rx_packet: Receive the queued RX buffer on a channel
* @ev_probe: Allocate resources for event queue
* @ev_init: Initialise event queue on the NIC
* @ev_fini: Deinitialise event queue on the NIC
@@ -1299,6 +1309,7 @@ struct efx_udp_tunnel {
* @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @print_additional_fwver: Dump NIC-specific additional FW version info
+ * @sensor_event: Handle a sensor event from MCDI
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1379,6 +1390,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
+ netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
@@ -1396,6 +1408,7 @@ struct efx_nic_type {
void (*rx_remove)(struct efx_rx_queue *rx_queue);
void (*rx_write)(struct efx_rx_queue *rx_queue);
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ void (*rx_packet)(struct efx_channel *channel);
int (*ev_probe)(struct efx_channel *channel);
int (*ev_init)(struct efx_channel *channel);
void (*ev_fini)(struct efx_channel *channel);
@@ -1470,6 +1483,7 @@ struct efx_nic_type {
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
size_t len);
+ void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1521,6 +1535,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = _channel->channel ? \
(_efx)->channel[_channel->channel - 1] : NULL)
+static inline struct efx_channel *
+efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels);
+ return efx->channel[efx->tx_channel_offset + index];
+}
+
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
@@ -1651,6 +1672,24 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
+/* Get the max fill level of the TX queues on this channel */
+static inline unsigned int
+efx_channel_tx_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ /* This function is currently only used by EF100, which maybe
+ * could do something simpler and just compute the fill level
+ * of the single TXQ that's really in use.
+ */
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->read_count);
+
+ return fill_level;
+}
+
/* Get all supported features.
* If a feature is not fixed, it is present in hw_features.
* If a feature is fixed, it does not present in hw_features, but
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index e04b6817cde3..974107354087 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -225,6 +225,12 @@ void efx_nic_event_test_start(struct efx_channel *channel);
bool efx_nic_event_present(struct efx_channel *channel);
+static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ if (efx->type->sensor_event)
+ efx->type->sensor_event(efx, ev);
+}
+
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 393b7cbac8b2..bea4725a4499 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1154,17 +1154,15 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
- if (!list_empty(&ptp->evt_list)) {
- list_for_each_safe(cursor, next, &ptp->evt_list) {
- struct efx_ptp_event_rx *evt;
-
- evt = list_entry(cursor, struct efx_ptp_event_rx,
- link);
- if (time_after(jiffies, evt->expiry)) {
- list_move(&evt->link, &ptp->evt_free_list);
- netif_warn(efx, hw, efx->net_dev,
- "PTP rx event dropped\n");
- }
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx,
+ link);
+ if (time_after(jiffies, evt->expiry)) {
+ list_move(&evt->link, &ptp->evt_free_list);
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP rx event dropped\n");
}
}
spin_unlock_bh(&ptp->evt_lock);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 219fb3a0c9d0..a7ea630bb5e6 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1018,6 +1018,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
.rx_push_rss_config = siena_rx_push_rss_config,
.rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
@@ -1025,6 +1026,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
@@ -1096,4 +1098,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
.check_caps = siena_check_caps,
+ .sensor_event = efx_mcdi_sensor_event,
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1bcf50ab95d9..727201d5eb24 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -284,7 +284,7 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = netdev_xmit_more();
@@ -503,7 +503,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = efx_get_tx_queue(efx, index, type);
- return efx_enqueue_skb(tx_queue, skb);
+ return __efx_enqueue_skb(tx_queue, skb);
}
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 11b64c609550..793e234819a8 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -71,6 +71,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
+ tx_queue->notify_count = 0;
tx_queue->write_count = 0;
tx_queue->packet_write_count = 0;
tx_queue->old_write_count = 0;
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index cbe995b024a6..bbab7f248250 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -40,4 +40,6 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+
+extern bool efx_separate_tx_channels;
#endif
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 6646eba9f57f..6eef0f45b133 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -951,7 +951,7 @@ out_stop:
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
ip->rxr_dma);
if (ip->tx_ring)
- dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring,
ip->txr_dma);
out_free:
free_netdev(dev);
@@ -964,7 +964,7 @@ static int ioc3eth_remove(struct platform_device *pdev)
struct ioc3_private *ip = netdev_priv(dev);
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
- dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index cb043eb1bdc1..f94078f8ebe5 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1499,15 +1499,13 @@ static void sc92031_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused sc92031_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct sc92031_priv *priv = netdev_priv(dev);
- pci_save_state(pdev);
-
if (!netif_running(dev))
- goto out;
+ return 0;
netif_device_detach(dev);
@@ -1521,22 +1519,16 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
spin_unlock_bh(&priv->lock);
-out:
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int sc92031_resume(struct pci_dev *pdev)
+static int __maybe_unused sc92031_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct sc92031_priv *priv = netdev_priv(dev);
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
if (!netif_running(dev))
- goto out;
+ return 0;
/* Interrupts already disabled by sc92031_suspend */
spin_lock_bh(&priv->lock);
@@ -1553,7 +1545,6 @@ static int sc92031_resume(struct pci_dev *pdev)
else
netif_tx_disable(dev);
-out:
return 0;
}
@@ -1565,13 +1556,14 @@ static const struct pci_device_id sc92031_pci_device_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
+static SIMPLE_DEV_PM_OPS(sc92031_pm_ops, sc92031_suspend, sc92031_resume);
+
static struct pci_driver sc92031_pci_driver = {
.name = SC92031_NAME,
.id_table = sc92031_pci_device_id_table,
.probe = sc92031_probe,
.remove = sc92031_remove,
- .suspend = sc92031_suspend,
- .resume = sc92031_resume,
+ .driver.pm = &sc92031_pm_ops,
};
module_pci_driver(sc92031_pci_driver);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 82e020add19f..336105f77313 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2502,11 +2502,9 @@ static void sis900_remove(struct pci_dev *pci_dev)
pci_release_regions(pci_dev);
}
-#ifdef CONFIG_PM
-
-static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused sis900_suspend(struct device *dev)
{
- struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
@@ -2519,22 +2517,17 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
/* Stop the chip's Tx and Rx Status Machine */
sw32(cr, RxDIS | TxDIS | sr32(cr));
- pci_set_power_state(pci_dev, PCI_D3hot);
- pci_save_state(pci_dev);
-
return 0;
}
-static int sis900_resume(struct pci_dev *pci_dev)
+static int __maybe_unused sis900_resume(struct device *dev)
{
- struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
if(!netif_running(net_dev))
return 0;
- pci_restore_state(pci_dev);
- pci_set_power_state(pci_dev, PCI_D0);
sis900_init_rxfilter(net_dev);
@@ -2558,17 +2551,15 @@ static int sis900_resume(struct pci_dev *pci_dev)
return 0;
}
-#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(sis900_pm_ops, sis900_suspend, sis900_resume);
static struct pci_driver sis900_pci_driver = {
.name = SIS900_MODULE_NAME,
.id_table = sis900_pci_tbl,
.probe = sis900_probe,
.remove = sis900_remove,
-#ifdef CONFIG_PM
- .suspend = sis900_suspend,
- .resume = sis900_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &sis900_pm_ops,
};
static int __init sis900_init_module(void)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index eae11c585025..ac5e8cc5fb9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -600,9 +600,14 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ if (!priv->plat->pmt)
+ return phylink_ethtool_get_wol(priv->phylink, wol);
+
mutex_lock(&priv->lock);
if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
+ wol->supported &= ~WAKE_MAGIC;
wol->wolopts = priv->wolopts;
}
mutex_unlock(&priv->lock);
@@ -613,15 +618,23 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct stmmac_priv *priv = netdev_priv(dev);
u32 support = WAKE_MAGIC | WAKE_UCAST;
+ if (!device_can_wakeup(priv->device))
+ return -EOPNOTSUPP;
+
+ if (!priv->plat->pmt) {
+ int ret = phylink_ethtool_set_wol(priv->phylink, wol);
+
+ if (!ret)
+ device_set_wakeup_enable(priv->device, !!wol->wolopts);
+ return ret;
+ }
+
/* By default almost all GMAC devices support the WoL via
* magic frame but we can disable it if the HW capability
* register shows no support for pmt_magic_frame. */
if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
wol->wolopts &= ~WAKE_MAGIC;
- if (!device_can_wakeup(priv->device))
- return -EINVAL;
-
if (wol->wolopts & ~support)
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 73677c3b33b6..89b2b3472852 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1075,6 +1075,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1100,6 +1101,9 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+
return ret;
}
@@ -2819,6 +2823,8 @@ static int stmmac_open(struct net_device *dev)
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -2892,6 +2898,8 @@ static int stmmac_release(struct net_device *dev)
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
@@ -5085,12 +5093,14 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device)) {
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
mutex_unlock(&priv->lock);
rtnl_lock();
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
phylink_stop(priv->phylink);
rtnl_unlock();
mutex_lock(&priv->lock);
@@ -5157,7 +5167,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device)) {
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -5200,9 +5210,11 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
- if (!device_may_wakeup(priv->device)) {
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
rtnl_lock();
phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 583cd2ef7662..58623e974a0c 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -345,33 +345,21 @@ static void tlan_stop(struct net_device *dev)
}
}
-#ifdef CONFIG_PM
-
-static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused tlan_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
if (netif_running(dev))
tlan_stop(dev);
netif_device_detach(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int tlan_resume(struct pci_dev *pdev)
+static int __maybe_unused tlan_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- int rc = pci_enable_device(pdev);
-
- if (rc)
- return rc;
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
+ struct net_device *dev = dev_get_drvdata(dev_d);
netif_device_attach(dev);
if (netif_running(dev))
@@ -380,21 +368,14 @@ static int tlan_resume(struct pci_dev *pdev)
return 0;
}
-#else /* CONFIG_PM */
-
-#define tlan_suspend NULL
-#define tlan_resume NULL
-
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume);
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = tlan_remove_one,
- .suspend = tlan_suspend,
- .resume = tlan_resume,
+ .driver.pm = &tlan_pm_ops,
};
static int __init tlan_probe(void)
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3902b3aeb0c2..07389702a540 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card,
descr = descr->next;
} while (descr != chain->ring);
- dma_free_coherent(&card->pdev->dev, chain->num_desc,
- chain->hwring, chain->dma_addr);
+ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
+ chain->hwring, chain->dma_addr);
}
/**
@@ -314,8 +314,6 @@ spider_net_init_chain(struct spider_net_card *card,
if (!chain->hwring)
return -ENOMEM;
- memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
-
/* Set up the hardware pointers in each descriptor */
descr = chain->ring;
hwdescr = chain->hwring;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 929244064abd..9a15f14daa47 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1407,10 +1407,8 @@ static int temac_probe(struct platform_device *pdev)
}
/* map device registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!lp->regs) {
+ lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
+ if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
return -ENOMEM;
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index ce719c830a77..32f339fedb21 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -810,7 +810,8 @@ static int nsim_dev_devlink_trap_init(struct devlink *devlink,
static int
nsim_dev_devlink_trap_action_set(struct devlink *devlink,
const struct devlink_trap *trap,
- enum devlink_trap_action action)
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_item *nsim_trap_item;
@@ -829,7 +830,8 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
static int
nsim_dev_devlink_trap_group_set(struct devlink *devlink,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer)
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index dd20c2c27c2f..726e4b240e7e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -194,6 +194,7 @@ config MDIO_MSCC_MIIM
config MDIO_MVUSB
tristate "Marvell USB to MDIO Adapter"
depends on USB
+ select MDIO_DEVRES
help
A USB to MDIO converter present on development boards for
Marvell's Link Street family of Ethernet switches.
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 46b33701ad4b..0af20faad69d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -554,8 +554,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->reset_gpiod = gpiod;
gpiod_set_value_cansleep(gpiod, 1);
- udelay(bus->reset_delay_us);
+ fsleep(bus->reset_delay_us);
gpiod_set_value_cansleep(gpiod, 0);
+ if (bus->reset_post_delay_us > 0)
+ fsleep(bus->reset_post_delay_us);
}
if (bus->reset) {
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 0f625a1b1644..0837319a52d7 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -132,7 +132,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
if (d)
- usleep_range(d, d + max_t(unsigned int, d / 10, 100));
+ fsleep(d);
}
EXPORT_SYMBOL(mdio_device_reset);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 061bebe25cb1..3c11a77f5709 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2968,7 +2968,7 @@ unlock:
return ret;
}
-static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
void __user *data)
{
struct bpf_prog *prog;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 5f123a8cf68e..d2fdb5430d27 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
minor = get_free_serial_index();
if (minor < 0)
- goto exit;
+ goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
+ if (IS_ERR(serial->parent->dev))
+ goto exit2;
/* fill in specific data for later use */
serial->minor = minor;
@@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
+exit2:
hso_serial_common_free(serial);
return -1;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index eccbf4cd7149..442507f25aad 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -377,10 +377,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
- struct usb_host_endpoint *ep_blkin;
- struct usb_host_endpoint *ep_blkout;
- struct usb_host_endpoint *ep_intr;
-
int msg_enable;
struct urb *urb_intr;
@@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int
-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
-{
- int tmp;
- struct usb_host_interface *alt = NULL;
- struct usb_host_endpoint *in = NULL, *out = NULL;
- struct usb_host_endpoint *status = NULL;
-
- for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
- unsigned ep;
-
- in = NULL;
- out = NULL;
- status = NULL;
- alt = intf->altsetting + tmp;
-
- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
- struct usb_host_endpoint *e;
- int intr = 0;
-
- e = alt->endpoint + ep;
- switch (e->desc.bmAttributes) {
- case USB_ENDPOINT_XFER_INT:
- if (!usb_endpoint_dir_in(&e->desc))
- continue;
- intr = 1;
- /* FALLTHROUGH */
- case USB_ENDPOINT_XFER_BULK:
- break;
- default:
- continue;
- }
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!intr && !in)
- in = e;
- else if (intr && !status)
- status = e;
- } else {
- if (!out)
- out = e;
- }
- }
- if (in && out)
- break;
- }
- if (!alt || !in || !out)
- return -EINVAL;
-
- dev->pipe_in = usb_rcvbulkpipe(dev->udev,
- in->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->pipe_out = usb_sndbulkpipe(dev->udev,
- out->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->ep_intr = status;
-
- return 0;
-}
-
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
- ret = lan78xx_get_endpoints(dev, intf);
- if (ret) {
- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
- ret);
- return ret;
- }
-
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
+ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
+ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ ep_intr = &intf->cur_altsetting->endpoint[2];
+ if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ usb_endpoint_num(&ep_intr->desc));
+
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
-
- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
-
- dev->pipe_intr = usb_rcvintpipe(dev->udev,
- dev->ep_intr->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- period = dev->ep_intr->desc.bInterval;
-
+ period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
@@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
usb_fill_int_urb(dev->urb_intr, dev->udev,
dev->pipe_intr, buf, maxp,
intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a43c97b13924..77658425db8a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
+ rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
@@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, NULL);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip_nh:
*idx += 1;
continue;
@@ -1403,12 +1406,15 @@ skip_nh:
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip:
*idx += 1;
}
}
+ rcu_read_unlock();
}
out:
return err;
@@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
- if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f, true, true);
+ if (is_zero_ether_addr(f->eth_addr) &&
+ f->vni == vxlan->cfg.vni)
+ continue;
+ vxlan_fdb_destroy(vxlan, f, true, true);
}
spin_unlock_bh(&vxlan->hash_lock[h]);
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 0196b7fd130d..fe4650260bc6 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2636,12 +2636,10 @@ fst_remove_one(struct pci_dev *pdev)
}
static struct pci_driver fst_driver = {
- .name = FST_NAME,
- .id_table = fst_pci_dev_id,
- .probe = fst_add_one,
- .remove = fst_remove_one,
- .suspend = NULL,
- .resume = NULL,
+ .name = FST_NAME,
+ .id_table = fst_pci_dev_id,
+ .probe = fst_add_one,
+ .remove = fst_remove_one,
};
static int __init
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 919d15584d4a..3c0c33a9f30c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -568,11 +568,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
case NL80211_CHAN_WIDTH_40:
phymode = MODE_11NG_HT40;
break;
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
+ default:
phymode = MODE_UNKNOWN;
break;
}
@@ -597,8 +593,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
case NL80211_CHAN_WIDTH_80P80:
phymode = MODE_11AC_VHT80_80;
break;
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
+ default:
phymode = MODE_UNKNOWN;
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 07d3e031c75a..94ae2b9ea663 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2072,7 +2072,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
ar, arvif->vdev_id, info->he_bss_color.color,
ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
- !info->he_bss_color.disabled);
+ info->he_bss_color.enabled);
if (ret)
ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1356e8cbe617..9dd9d73f4484 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4334,7 +4334,7 @@ static int __init init_mac80211_hwsim(void)
break;
case HWSIM_REGTEST_STRICT_ALL:
param.reg_strict = true;
- /* fall through */
+ fallthrough;
case HWSIM_REGTEST_DRIVER_REG_ALL:
param.reg_alpha2 = hwsim_alpha2s[0];
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 71cd8629b28e..8b476b007c5e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -36,9 +36,9 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
-#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
+#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
-#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index add040168e67..4ee2330c603e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
int pos;
int len;
+ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
+ return 0;
+
c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cpu_to_le32(nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
@@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
- /*
- * Don't treat non-retryable errors as fatal, as we potentially
- * already have a NGUID or EUI-64. If we failed with DNR set,
- * we want to silently ignore the error as we can still
- * identify the device, but if the status has DNR set, we want
- * to propagate the error back specifically for the disk
- * revalidation flow to make sure we don't abandon the
- * device just because of a temporal retry-able error (such
- * as path of transport errors).
- */
- if (status > 0 && (status & NVME_SC_DNR))
- status = 0;
goto free_data;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1de3f9b827aa..09ffc3246f60 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -129,6 +129,13 @@ enum nvme_quirks {
* Don't change the value of the temperature threshold feature
*/
NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
+
+ /*
+ * The controller doesn't handle the Identify Namespace
+ * Identification Descriptor list subcommand despite claiming
+ * NVMe 1.3 compliance.
+ */
+ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b1d18f0633c7..d4b1ff747123 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
@@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 79ef2b8e2b3c..f3a91818167b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (nctrl->opts->tos >= 0)
ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
+ /* Set 10 seconds timeout for icresp recvmsg */
+ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
+
queue->sock->sk->sk_allocation = GFP_ATOMIC;
nvme_tcp_set_queue_io_cpu(queue);
queue->request = NULL;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index eb84507de28a..cb32d7ef4938 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -268,6 +268,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us);
+ mdio->reset_post_delay_us = 0;
+ of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
/* Register the MDIO bus */
rc = mdiobus_register(mdio);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 812bfc32ecb8..2ea61abd5830 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index ff1ee159dca2..f8ff30cdafa6 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -7,6 +7,8 @@ config PINCTRL_MSM
select PINCONF
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 83b7d64bc4c1..c322f30a2064 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
msm_gpio_irq_clear_unmask(d, false);
}
+/**
+ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
+ * @d: The irq dta.
+ *
+ * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are
+ * normally handled by the parent irqchip. The logic here is slightly
+ * different due to what's easy to do with our parent, but in principle it's
+ * the same.
+ */
+static void msm_gpio_update_dual_edge_parent(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+ int loop_limit = 100;
+ unsigned int val;
+ unsigned int type;
+
+ /* Read the value and make a guess about what edge we need to catch */
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+
+ do {
+ /* Set the parent to catch the next edge */
+ irq_chip_set_type_parent(d, type);
+
+ /*
+ * Possibly the line changed between when we last read "val"
+ * (and decided what edge we needed) and when set the edge.
+ * If the value didn't change (or changed and then changed
+ * back) then we're done.
+ */
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ if (type == IRQ_TYPE_EDGE_RISING) {
+ if (!val)
+ return;
+ type = IRQ_TYPE_EDGE_FALLING;
+ } else if (type == IRQ_TYPE_EDGE_FALLING) {
+ if (val)
+ return;
+ type = IRQ_TYPE_EDGE_RISING;
+ }
+ } while (loop_limit-- > 0);
+ dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n");
+}
+
static void msm_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags;
u32 val;
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_parent(d);
return;
+ }
g = &pctrl->soc->groups[d->hwirq];
@@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
+static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
+ unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ return type == IRQ_TYPE_EDGE_BOTH &&
+ pctrl->soc->wakeirq_dual_edge_errata && d->parent_data &&
+ test_bit(d->hwirq, pctrl->skip_wake_irqs);
+}
+
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
u32 val;
+ if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
+ set_bit(d->hwirq, pctrl->dual_edge_irqs);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ msm_gpio_update_dual_edge_parent(d);
+ return 0;
+ }
+
if (d->parent_data)
irq_chip_set_type_parent(d, type);
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+ clear_bit(d->hwirq, pctrl->dual_edge_irqs);
+ irq_set_handler_locked(d, handle_fasteoi_irq);
return 0;
+ }
g = &pctrl->soc->groups[d->hwirq];
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 9452da18a78b..7486fe08eb9b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map {
* @pull_no_keeper: The SoC does not support keeper bias.
* @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
* @nwakeirq_map: The number of entries in @wakeirq_map
+ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
+ * to be aware that their parent can't handle dual
+ * edge interrupts.
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data {
const int *reserved_gpios;
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
+ bool wakeirq_dual_edge_errata;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 1b6465a882f2..1d9acad3c1ce 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.ntiles = ARRAY_SIZE(sc7180_tiles),
.wakeirq_map = sc7180_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index 69eedda9f731..b297c4aba2ba 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -20,6 +20,10 @@
#define HW_DPLL_1 (0x8b00)
#define HW_DPLL_2 (0x8c00)
#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
#define HW_DPLL_TOD_CTRL_1 (0x089)
@@ -57,6 +61,43 @@
#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
#define RESET_CTRL 0xc000
#define SM_RESET 0x0012
#define SM_RESET_CMD 0x5A
@@ -191,6 +232,7 @@
#define DPLL_CTRL_0 0xc600
#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
#define DPLL_CTRL_1 0xc63c
@@ -646,6 +688,9 @@
/* Bit definitions for the TOD_WRITE_CMD register */
#define TOD_WRITE_SELECTION_SHIFT (0)
#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
#define RD_PWM_DECODER_INDEX_SHIFT (4)
@@ -658,4 +703,7 @@
#define TOD_READ_TRIGGER_SHIFT (0)
#define TOD_READ_TRIGGER_MASK (0xf)
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
#endif
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index ceb6bc58f3b4..73aaae5574ed 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -13,6 +13,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
+#include <linux/string.h>
#include "ptp_private.h"
#include "ptp_clockmatrix.h"
@@ -23,6 +24,13 @@ MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
#define SETTIME_CORRECTION (0)
static long set_write_phase_ready(struct ptp_clock_info *ptp)
@@ -95,6 +103,45 @@ static int timespec_to_char_array(struct timespec64 const *ts,
return 0;
}
+static int idtcm_strverscmp(const char *ver1, const char *ver2)
+{
+ u8 num1;
+ u8 num2;
+ int result = 0;
+
+ /* loop through each level of the version string */
+ while (result == 0) {
+ /* extract leading version numbers */
+ if (kstrtou8(ver1, 10, &num1) < 0)
+ return -1;
+
+ if (kstrtou8(ver2, 10, &num2) < 0)
+ return -1;
+
+ /* if numbers differ, then set the result */
+ if (num1 < num2)
+ result = -1;
+ else if (num1 > num2)
+ result = 1;
+ else {
+ /* if numbers are the same, go to next level */
+ ver1 = strchr(ver1, '.');
+ ver2 = strchr(ver2, '.');
+ if (!ver1 && !ver2)
+ break;
+ else if (!ver1)
+ result = -1;
+ else if (!ver2)
+ result = 1;
+ else {
+ ver1++;
+ ver2++;
+ }
+ }
+ }
+ return result;
+}
+
static int idtcm_xfer(struct idtcm *idtcm,
u8 regaddr,
u8 *buf,
@@ -104,6 +151,7 @@ static int idtcm_xfer(struct idtcm *idtcm,
struct i2c_client *client = idtcm->client;
struct i2c_msg msg[2];
int cnt;
+ char *fmt = "i2c_transfer failed at %d in %s for %s, at addr: %04X!\n";
msg[0].addr = client->addr;
msg[0].flags = 0;
@@ -118,7 +166,12 @@ static int idtcm_xfer(struct idtcm *idtcm,
cnt = i2c_transfer(client->adapter, msg, 2);
if (cnt < 0) {
- dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ dev_err(&client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ write ? "write" : "read",
+ regaddr);
return cnt;
} else if (cnt != 2) {
dev_err(&client->dev,
@@ -144,10 +197,12 @@ static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
- if (err)
+ if (err) {
+ idtcm->page_offset = 0xff;
dev_err(&idtcm->client->dev, "failed to set page offset\n");
- else
+ } else {
idtcm->page_offset = val;
+ }
return err;
}
@@ -198,6 +253,7 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
{
struct idtcm *idtcm = channel->idtcm;
u8 buf[TOD_BYTE_COUNT];
+ u8 timeout = 10;
u8 trigger;
int err;
@@ -208,16 +264,29 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
- trigger |= TOD_READ_TRIGGER_MODE;
+ trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
err = idtcm_write(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
-
if (err)
return err;
- if (idtcm->calculate_overhead_flag)
- idtcm->start_time = ktime_get_raw();
+ /* wait trigger to be 0 */
+ while (trigger & TOD_READ_TRIGGER_MASK) {
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger,
+ sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (--timeout == 0)
+ return -EIO;
+ }
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY, buf, sizeof(buf));
@@ -240,6 +309,7 @@ static int _sync_pll_output(struct idtcm *idtcm,
u8 val;
u16 sync_ctrl0;
u16 sync_ctrl1;
+ u8 temp;
if ((qn == 0) && (qn_plus_1 == 0))
return 0;
@@ -305,6 +375,50 @@ static int _sync_pll_output(struct idtcm *idtcm,
if (err)
return err;
+ /* PLL5 can have OUT8 as second additional output. */
+ if ((pll == 5) && (qn_plus_1 != 0)) {
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp &= ~(Q9_TO_Q8_SYNC_TRIG);
+
+ err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp |= Q9_TO_Q8_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+ }
+
+ /* PLL6 can have OUT11 as second additional output. */
+ if ((pll == 6) && (qn_plus_1 != 0)) {
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp &= ~(Q10_TO_Q11_SYNC_TRIG);
+
+ err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ temp |= Q10_TO_Q11_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+ }
+
/* Place master sync out of reset */
val &= ~(SYNCTRL1_MASTER_SYNC_RST);
err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
@@ -312,6 +426,30 @@ static int _sync_pll_output(struct idtcm *idtcm,
return err;
}
+static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
+{
+ int err = 0;
+
+ switch (tod_addr) {
+ case TOD_0:
+ *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case TOD_1:
+ *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case TOD_2:
+ *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case TOD_3:
+ *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
static int idtcm_sync_pps_output(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
@@ -321,37 +459,68 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
u8 qn;
u8 qn_plus_1;
int err = 0;
+ u8 out8_mux = 0;
+ u8 out11_mux = 0;
+ u8 temp;
u16 output_mask = channel->output_mask;
- switch (channel->dpll_n) {
- case DPLL_0:
- sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
- break;
- case DPLL_1:
- sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
- break;
- case DPLL_2:
- sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
- break;
- case DPLL_3:
- sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
- break;
- default:
- return -EINVAL;
- }
+ err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
+ if (err)
+ return err;
- for (pll = 0; pll < 8; pll++) {
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
+ if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out8_mux = 1;
+
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out11_mux = 1;
+
+ for (pll = 0; pll < 8; pll++) {
+ qn = 0;
+ qn_plus_1 = 0;
if (pll < 4) {
/* First 4 pll has 2 outputs */
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
qn_plus_1 = output_mask & 0x1;
output_mask = output_mask >> 1;
- } else {
- qn_plus_1 = 0;
+ } else if (pll == 4) {
+ if (out8_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 5) {
+ if (out8_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 6) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ if (out11_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 7) {
+ if (out11_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
}
if ((qn != 0) || (qn_plus_1 != 0))
@@ -365,7 +534,7 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
return err;
}
-static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
+static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
struct timespec64 const *ts,
enum hw_tod_write_trig_sel wr_trig)
{
@@ -439,17 +608,78 @@ static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
return err;
}
+static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum scsr_tod_write_trig_sel wr_trig,
+ enum scsr_tod_write_type_sel wr_type)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ unsigned char buf[TOD_BYTE_COUNT], cmd;
+ struct timespec64 local_ts = *ts;
+ int err, count = 0;
+
+ timespec64_add_ns(&local_ts, SETTIME_CORRECTION);
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE,
+ buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* Trigger the write operation. */
+ err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ cmd &= ~(TOD_WRITE_SELECTION_MASK << TOD_WRITE_SELECTION_SHIFT);
+ cmd &= ~(TOD_WRITE_TYPE_MASK << TOD_WRITE_TYPE_SHIFT);
+ cmd |= (wr_trig << TOD_WRITE_SELECTION_SHIFT);
+ cmd |= (wr_type << TOD_WRITE_TYPE_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ while (1) {
+ /* pps trigger takes up to 1 sec to complete */
+ if (wr_trig == SCSR_TOD_WR_TRIG_SEL_TODPPS)
+ msleep(50);
+
+ err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
+ &cmd, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd == 0)
+ break;
+
+ if (++count > 20) {
+ dev_err(&idtcm->client->dev,
+ "Timed out waiting for the write counter\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
static int _idtcm_settime(struct idtcm_channel *channel,
struct timespec64 const *ts,
enum hw_tod_write_trig_sel wr_trig)
{
struct idtcm *idtcm = channel->idtcm;
- s32 retval;
int err;
int i;
u8 trig_sel;
- err = _idtcm_set_dpll_tod(channel, ts, wr_trig);
+ err = _idtcm_set_dpll_hw_tod(channel, ts, wr_trig);
if (err)
return err;
@@ -469,12 +699,24 @@ static int _idtcm_settime(struct idtcm_channel *channel,
err = 1;
}
- if (err)
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
return err;
+ }
- retval = idtcm_sync_pps_output(channel);
+ return idtcm_sync_pps_output(channel);
+}
- return retval;
+static int _idtcm_settime_v487(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum scsr_tod_write_type_sel wr_type)
+{
+ return _idtcm_set_dpll_scsr_tod(channel, ts,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE,
+ wr_type);
}
static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
@@ -565,6 +807,50 @@ static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
return err;
}
+static int set_tod_write_overhead(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT] = {0};
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ current_ns = ktime_to_ns(stop - start);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtcm->tod_write_overhead_ns = lowest_ns;
+
+ return err;
+}
+
static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
{
int err;
@@ -577,6 +863,11 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
} else {
idtcm->calculate_overhead_flag = 1;
+ err = set_tod_write_overhead(channel);
+
+ if (err)
+ return err;
+
err = _idtcm_gettime(channel, &ts);
if (err)
@@ -656,93 +947,118 @@ static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
config_select, sizeof(u8));
}
-static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
-{
- int err = 0;
-
- if (addr == PLL_MASK_ADDR) {
- if ((val & 0xf0) || !(val & 0xf)) {
- dev_err(&idtcm->client->dev,
- "Invalid PLL mask 0x%hhx\n", val);
- err = -EINVAL;
- }
- *mask = val;
- }
-
- return err;
-}
-
static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
{
int err = 0;
switch (addr) {
- case OUTPUT_MASK_PLL0_ADDR:
+ case TOD0_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[0].output_mask, val);
break;
- case OUTPUT_MASK_PLL0_ADDR + 1:
+ case TOD0_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[0].output_mask, val);
break;
- case OUTPUT_MASK_PLL1_ADDR:
+ case TOD1_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[1].output_mask, val);
break;
- case OUTPUT_MASK_PLL1_ADDR + 1:
+ case TOD1_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[1].output_mask, val);
break;
- case OUTPUT_MASK_PLL2_ADDR:
+ case TOD2_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[2].output_mask, val);
break;
- case OUTPUT_MASK_PLL2_ADDR + 1:
+ case TOD2_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[2].output_mask, val);
break;
- case OUTPUT_MASK_PLL3_ADDR:
+ case TOD3_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[3].output_mask, val);
break;
- case OUTPUT_MASK_PLL3_ADDR + 1:
+ case TOD3_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[3].output_mask, val);
break;
default:
- err = -EINVAL;
+ err = -EFAULT; /* Bad address */;
break;
}
return err;
}
+static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
+{
+ if (index >= MAX_TOD) {
+ dev_err(&idtcm->client->dev, "ToD%d not supported\n", index);
+ return -EINVAL;
+ }
+
+ if (pll >= MAX_PLL) {
+ dev_err(&idtcm->client->dev, "Pll%d not supported\n", pll);
+ return -EINVAL;
+ }
+
+ idtcm->channel[index].pll = pll;
+
+ return 0;
+}
+
static int check_and_set_masks(struct idtcm *idtcm,
u16 regaddr,
u8 val)
{
int err = 0;
- if (set_pll_output_mask(idtcm, regaddr, val)) {
- /* Not an output mask, check for pll mask */
- err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask);
+ switch (regaddr) {
+ case TOD_MASK_ADDR:
+ if ((val & 0xf0) || !(val & 0x0f)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid TOD mask 0x%hhx\n", val);
+ err = -EINVAL;
+ } else {
+ idtcm->tod_mask = val;
+ }
+ break;
+ case TOD0_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 0, val);
+ break;
+ case TOD1_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 1, val);
+ break;
+ case TOD2_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 2, val);
+ break;
+ case TOD3_PTP_PLL_ADDR:
+ err = set_tod_ptp_pll(idtcm, 3, val);
+ break;
+ default:
+ err = set_pll_output_mask(idtcm, regaddr, val);
+ break;
}
return err;
}
-static void display_pll_and_output_masks(struct idtcm *idtcm)
+static void display_pll_and_masks(struct idtcm *idtcm)
{
u8 i;
u8 mask;
- dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask);
+ dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x\n", idtcm->tod_mask);
- for (i = 0; i < MAX_PHC_PLL; i++) {
+ for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
- if (mask & idtcm->pll_mask)
+ if (mask & idtcm->tod_mask)
dev_dbg(&idtcm->client->dev,
- "PLL%d output_mask = 0x%04x\n",
- i, idtcm->channel[i].output_mask);
+ "TOD%d pll = %d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].pll,
+ idtcm->channel[i].output_mask);
}
}
static int idtcm_load_firmware(struct idtcm *idtcm,
struct device *dev)
{
+ char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idtcm_fwrc *rec;
u32 regaddr;
@@ -751,12 +1067,20 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
u8 val;
u8 loaddr;
- dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME);
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
- err = request_firmware(&fw, FW_FILENAME, dev);
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", fname);
- if (err)
+ err = request_firmware(&fw, fname, dev);
+
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
return err;
+ }
dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
@@ -783,7 +1107,9 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
err = check_and_set_masks(idtcm, regaddr, val);
}
- if (err == 0) {
+ if (err != -EINVAL) {
+ err = 0;
+
/* Top (status registers) and bottom are read-only */
if ((regaddr < GPIO_USER_CONTROL)
|| (regaddr >= SCRATCH))
@@ -801,42 +1127,22 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
goto out;
}
- display_pll_and_output_masks(idtcm);
+ display_pll_and_masks(idtcm);
out:
release_firmware(fw);
return err;
}
-static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
+static int idtcm_output_enable(struct idtcm_channel *channel,
+ bool enable, unsigned int outn)
{
struct idtcm *idtcm = channel->idtcm;
- u32 module;
- u8 val;
int err;
+ u8 val;
- /*
- * This assumes that the 1-PPS is on the second of the two
- * output. But is this always true?
- */
- switch (channel->dpll_n) {
- case DPLL_0:
- module = OUTPUT_1;
- break;
- case DPLL_1:
- module = OUTPUT_3;
- break;
- case DPLL_2:
- module = OUTPUT_5;
- break;
- case DPLL_3:
- module = OUTPUT_7;
- break;
- default:
- return -EINVAL;
- }
-
- err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+ err = idtcm_read(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
+ OUT_CTRL_1, &val, sizeof(val));
if (err)
return err;
@@ -846,14 +1152,50 @@ static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
else
val &= ~SQUELCH_DISABLE;
- err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+ return idtcm_write(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
+ OUT_CTRL_1, &val, sizeof(val));
+}
- if (err)
- return err;
+static int idtcm_output_mask_enable(struct idtcm_channel *channel,
+ bool enable)
+{
+ u16 mask;
+ int err;
+ u8 outn;
+
+ mask = channel->output_mask;
+ outn = 0;
+
+ while (mask) {
+
+ if (mask & 0x1) {
+
+ err = idtcm_output_enable(channel, enable, outn);
+
+ if (err)
+ return err;
+ }
+
+ mask >>= 0x1;
+ outn++;
+ }
return 0;
}
+static int idtcm_perout_enable(struct idtcm_channel *channel,
+ bool enable,
+ struct ptp_perout_request *perout)
+{
+ unsigned int flags = perout->flags;
+
+ if (flags == PEROUT_ENABLE_OUTPUT_MASK)
+ return idtcm_output_mask_enable(channel, enable);
+
+ /* Enable/disable individual output instead */
+ return idtcm_output_enable(channel, enable, perout->index);
+}
+
static int idtcm_set_pll_mode(struct idtcm_channel *channel,
enum pll_mode pll_mode)
{
@@ -940,10 +1282,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
return err;
}
-static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
u8 i;
bool neg_adj = 0;
@@ -970,15 +1310,15 @@ static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
* FCW = -------------
* 111 * 2^4
*/
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
/* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
- fcw = ppb * 1000000000000ULL;
+ fcw = scaled_ppm * 244140625ULL;
- fcw = div_u64(fcw, 111022);
+ fcw = div_u64(fcw, 1776);
if (neg_adj)
fcw = -fcw;
@@ -988,12 +1328,9 @@ static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
fcw >>= 8;
}
- mutex_lock(&idtcm->reg_lock);
-
err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
buf, sizeof(buf));
- mutex_unlock(&idtcm->reg_lock);
return err;
}
@@ -1008,6 +1345,12 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
err = _idtcm_gettime(channel, ts);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
mutex_unlock(&idtcm->reg_lock);
return err;
@@ -1025,6 +1368,35 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime_v487(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime_v487(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
mutex_unlock(&idtcm->reg_lock);
return err;
@@ -1041,6 +1413,54 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
err = _idtcm_adjtime(channel, delta);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ enum scsr_tod_write_type_sel type;
+ int err;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_V487) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+
+ if (delta >= 0) {
+ ts = ns_to_timespec64(delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ } else {
+ ts = ns_to_timespec64(-delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime_v487(channel, &ts, type);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
mutex_unlock(&idtcm->reg_lock);
return err;
@@ -1059,6 +1479,36 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
err = _idtcm_adjphase(channel, delta);
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjfine(channel, scaled_ppm);
+
+ if (err)
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+
mutex_unlock(&idtcm->reg_lock);
return err;
@@ -1067,20 +1517,35 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ int err;
+
struct idtcm_channel *channel =
container_of(ptp, struct idtcm_channel, caps);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- if (!on)
- return idtcm_pps_enable(channel, false);
+ if (!on) {
+ err = idtcm_perout_enable(channel, false, &rq->perout);
+ if (err)
+ dev_err(&channel->idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
/* Only accept a 1-PPS aligned to the second. */
if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
rq->perout.period.nsec)
return -ERANGE;
- return idtcm_pps_enable(channel, true);
+ err = idtcm_perout_enable(channel, true, &rq->perout);
+ if (err)
+ dev_err(&channel->idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
default:
break;
}
@@ -1088,17 +1553,237 @@ static int idtcm_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
-static int idtcm_enable_tod(struct idtcm_channel *channel)
+static int _enable_pll_tod_sync(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 dpll;
+ u16 out0 = 0, out1 = 0;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ dpll = DPLL_0;
+ if (qn)
+ out0 = OUTPUT_0;
+ if (qn_plus_1)
+ out1 = OUTPUT_1;
+ break;
+ case 1:
+ dpll = DPLL_1;
+ if (qn)
+ out0 = OUTPUT_2;
+ if (qn_plus_1)
+ out1 = OUTPUT_3;
+ break;
+ case 2:
+ dpll = DPLL_2;
+ if (qn)
+ out0 = OUTPUT_4;
+ if (qn_plus_1)
+ out1 = OUTPUT_5;
+ break;
+ case 3:
+ dpll = DPLL_3;
+ if (qn)
+ out0 = OUTPUT_6;
+ if (qn_plus_1)
+ out1 = OUTPUT_7;
+ break;
+ case 4:
+ dpll = DPLL_4;
+ if (qn)
+ out0 = OUTPUT_8;
+ break;
+ case 5:
+ dpll = DPLL_5;
+ if (qn)
+ out0 = OUTPUT_9;
+ if (qn_plus_1)
+ out1 = OUTPUT_8;
+ break;
+ case 6:
+ dpll = DPLL_6;
+ if (qn)
+ out0 = OUTPUT_10;
+ if (qn_plus_1)
+ out1 = OUTPUT_11;
+ break;
+ case 7:
+ dpll = DPLL_7;
+ if (qn)
+ out0 = OUTPUT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Enable OUTPUT OUT_SYNC.
+ */
+ if (out0) {
+ err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ val &= ~OUT_SYNC_DISABLE;
+
+ err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+
+ if (out1) {
+ err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ val &= ~OUT_SYNC_DISABLE;
+
+ err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+
+ /* enable dpll sync tod pps, must be set before dpll_mode */
+ err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT);
+ val |= (sync_src << TOD_SYNC_SOURCE_SHIFT);
+ val |= TOD_SYNC_EN;
+
+ return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
+}
+
+static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
- struct timespec64 ts = {0, 0};
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
u8 cfg;
- int err;
+ int err = 0;
+ u16 output_mask = channel->output_mask;
+ u8 out8_mux = 0;
+ u8 out11_mux = 0;
+ u8 temp;
+
+ /*
+ * set tod_out_sync_enable to 0.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg &= ~TOD_OUT_SYNC_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ switch (channel->tod_n) {
+ case TOD_0:
+ sync_src = 0;
+ break;
+ case TOD_1:
+ sync_src = 1;
+ break;
+ case TOD_2:
+ sync_src = 2;
+ break;
+ case TOD_3:
+ sync_src = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
- err = idtcm_pps_enable(channel, false);
+ err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
+ &temp, sizeof(temp));
if (err)
return err;
+ if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out8_mux = 1;
+
+ err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
+ &temp, sizeof(temp));
+ if (err)
+ return err;
+
+ if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
+ Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
+ out11_mux = 1;
+
+ for (pll = 0; pll < 8; pll++) {
+ qn = 0;
+ qn_plus_1 = 0;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 4) {
+ if (out8_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 5) {
+ if (out8_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else if (pll == 6) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ if (out11_mux) {
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ } else if (pll == 7) {
+ if (out11_mux == 0) {
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ }
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
/*
* Start the TOD clock ticking.
*/
@@ -1134,16 +1819,32 @@ static void idtcm_display_version_info(struct idtcm *idtcm)
idtcm_read_otp_scsr_config_select(idtcm, &config_select);
+ snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
+ major, minor, hotfix);
+
dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
product_id, hw_rev_id, config_select);
}
+static const struct ptp_clock_info idtcm_caps_v487 = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 12,
+ .adjphase = &idtcm_adjphase,
+ .adjfine = &idtcm_adjfine,
+ .adjtime = &idtcm_adjtime_v487,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime_v487,
+ .enable = &idtcm_enable,
+ .do_aux_work = &set_write_phase_ready,
+};
+
static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
- .n_per_out = 1,
+ .n_per_out = 12,
.adjphase = &idtcm_adjphase,
- .adjfreq = &idtcm_adjfreq,
+ .adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
@@ -1151,24 +1852,14 @@ static const struct ptp_clock_info idtcm_caps = {
.do_aux_work = &set_write_phase_ready,
};
-
-static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+static int configure_channel_pll(struct idtcm_channel *channel)
{
- struct idtcm_channel *channel;
- int err;
-
- if (!(index < MAX_PHC_PLL))
- return -EINVAL;
-
- channel = &idtcm->channel[index];
+ int err = 0;
- switch (index) {
+ switch (channel->pll) {
case 0:
channel->dpll_freq = DPLL_FREQ_0;
channel->dpll_n = DPLL_0;
- channel->tod_read_primary = TOD_READ_PRIMARY_0;
- channel->tod_write = TOD_WRITE_0;
- channel->tod_n = TOD_0;
channel->hw_dpll_n = HW_DPLL_0;
channel->dpll_phase = DPLL_PHASE_0;
channel->dpll_ctrl_n = DPLL_CTRL_0;
@@ -1177,9 +1868,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
case 1:
channel->dpll_freq = DPLL_FREQ_1;
channel->dpll_n = DPLL_1;
- channel->tod_read_primary = TOD_READ_PRIMARY_1;
- channel->tod_write = TOD_WRITE_1;
- channel->tod_n = TOD_1;
channel->hw_dpll_n = HW_DPLL_1;
channel->dpll_phase = DPLL_PHASE_1;
channel->dpll_ctrl_n = DPLL_CTRL_1;
@@ -1188,9 +1876,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
case 2:
channel->dpll_freq = DPLL_FREQ_2;
channel->dpll_n = DPLL_2;
- channel->tod_read_primary = TOD_READ_PRIMARY_2;
- channel->tod_write = TOD_WRITE_2;
- channel->tod_n = TOD_2;
channel->hw_dpll_n = HW_DPLL_2;
channel->dpll_phase = DPLL_PHASE_2;
channel->dpll_ctrl_n = DPLL_CTRL_2;
@@ -1199,31 +1884,129 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
case 3:
channel->dpll_freq = DPLL_FREQ_3;
channel->dpll_n = DPLL_3;
- channel->tod_read_primary = TOD_READ_PRIMARY_3;
- channel->tod_write = TOD_WRITE_3;
- channel->tod_n = TOD_3;
channel->hw_dpll_n = HW_DPLL_3;
channel->dpll_phase = DPLL_PHASE_3;
channel->dpll_ctrl_n = DPLL_CTRL_3;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
break;
+ case 4:
+ channel->dpll_freq = DPLL_FREQ_4;
+ channel->dpll_n = DPLL_4;
+ channel->hw_dpll_n = HW_DPLL_4;
+ channel->dpll_phase = DPLL_PHASE_4;
+ channel->dpll_ctrl_n = DPLL_CTRL_4;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_4;
+ break;
+ case 5:
+ channel->dpll_freq = DPLL_FREQ_5;
+ channel->dpll_n = DPLL_5;
+ channel->hw_dpll_n = HW_DPLL_5;
+ channel->dpll_phase = DPLL_PHASE_5;
+ channel->dpll_ctrl_n = DPLL_CTRL_5;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_5;
+ break;
+ case 6:
+ channel->dpll_freq = DPLL_FREQ_6;
+ channel->dpll_n = DPLL_6;
+ channel->hw_dpll_n = HW_DPLL_6;
+ channel->dpll_phase = DPLL_PHASE_6;
+ channel->dpll_ctrl_n = DPLL_CTRL_6;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_6;
+ break;
+ case 7:
+ channel->dpll_freq = DPLL_FREQ_7;
+ channel->dpll_n = DPLL_7;
+ channel->hw_dpll_n = HW_DPLL_7;
+ channel->dpll_phase = DPLL_PHASE_7;
+ channel->dpll_ctrl_n = DPLL_CTRL_7;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_7;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ /* Set pll addresses */
+ err = configure_channel_pll(channel);
+ if (err)
+ return err;
+
+ /* Set tod addresses */
+ switch (index) {
+ case 0:
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ break;
+ case 1:
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ break;
+ case 2:
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ break;
+ case 3:
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ break;
default:
return -EINVAL;
}
channel->idtcm = idtcm;
- channel->caps = idtcm_caps;
+ if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
+ channel->caps = idtcm_caps_v487;
+ else
+ channel->caps = idtcm_caps;
+
snprintf(channel->caps.name, sizeof(channel->caps.name),
- "IDT CM PLL%u", index);
+ "IDT CM TOD%u", index);
+
+ if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) {
+ err = idtcm_enable_tod_sync(channel);
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
+ return err;
+ }
+ }
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
- if (err)
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
return err;
+ }
err = idtcm_enable_tod(channel);
- if (err)
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ "Failed at line %d in func %s!\n",
+ __LINE__,
+ __func__);
return err;
+ }
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
@@ -1249,7 +2032,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
u8 i;
struct idtcm_channel *channel;
- for (i = 0; i < MAX_PHC_PLL; i++) {
+ for (i = 0; i < MAX_TOD; i++) {
channel = &idtcm->channel[i];
@@ -1260,7 +2043,12 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
static void set_default_masks(struct idtcm *idtcm)
{
- idtcm->pll_mask = DEFAULT_PLL_MASK;
+ idtcm->tod_mask = DEFAULT_TOD_MASK;
+
+ idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
+ idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
+ idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL;
+ idtcm->channel[3].pll = DEFAULT_TOD3_PTP_PLL;
idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
@@ -1268,51 +2056,13 @@ static void set_default_masks(struct idtcm *idtcm)
idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
}
-static int set_tod_write_overhead(struct idtcm *idtcm)
-{
- int err;
- u8 i;
-
- s64 total_ns = 0;
-
- ktime_t start;
- ktime_t stop;
-
- char buf[TOD_BYTE_COUNT];
-
- struct idtcm_channel *channel = &idtcm->channel[2];
-
- /* Set page offset */
- idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
- buf, sizeof(buf));
-
- for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
-
- start = ktime_get_raw();
-
- err = idtcm_write(idtcm, channel->hw_dpll_n,
- HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
-
- if (err)
- return err;
-
- stop = ktime_get_raw();
-
- total_ns += ktime_to_ns(stop - start);
- }
-
- idtcm->tod_write_overhead_ns = div_s64(total_ns,
- TOD_WRITE_OVERHEAD_COUNT_MAX);
-
- return err;
-}
-
static int idtcm_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct idtcm *idtcm;
int err;
u8 i;
+ char *fmt = "Failed at %d in line %s with channel output %d!\n";
/* Unused for now */
(void)id;
@@ -1333,25 +2083,24 @@ static int idtcm_probe(struct i2c_client *client,
idtcm_display_version_info(idtcm);
- err = set_tod_write_overhead(idtcm);
-
- if (err) {
- mutex_unlock(&idtcm->reg_lock);
- return err;
- }
-
err = idtcm_load_firmware(idtcm, &client->dev);
if (err)
dev_warn(&idtcm->client->dev,
"loading firmware failed with %d\n", err);
- if (idtcm->pll_mask) {
- for (i = 0; i < MAX_PHC_PLL; i++) {
- if (idtcm->pll_mask & (1 << i)) {
+ if (idtcm->tod_mask) {
+ for (i = 0; i < MAX_TOD; i++) {
+ if (idtcm->tod_mask & (1 << i)) {
err = idtcm_enable_channel(idtcm, i);
- if (err)
+ if (err) {
+ dev_err(&idtcm->client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ i);
break;
+ }
}
}
} else {
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 3de0eb72889c..ffae56c5d97f 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -13,32 +13,48 @@
#include "idt8a340_reg.h"
#define FW_FILENAME "idtcm.bin"
-#define MAX_PHC_PLL 4
+#define MAX_TOD (4)
+#define MAX_PLL (8)
#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
-#define PLL_MASK_ADDR (0xFFA5)
-#define DEFAULT_PLL_MASK (0x04)
+#define TOD_MASK_ADDR (0xFFA5)
+#define DEFAULT_TOD_MASK (0x04)
#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
-#define OUTPUT_MASK_PLL0_ADDR (0xFFB0)
-#define OUTPUT_MASK_PLL1_ADDR (0xFFB2)
-#define OUTPUT_MASK_PLL2_ADDR (0xFFB4)
-#define OUTPUT_MASK_PLL3_ADDR (0xFFB6)
+#define TOD0_PTP_PLL_ADDR (0xFFA8)
+#define TOD1_PTP_PLL_ADDR (0xFFA9)
+#define TOD2_PTP_PLL_ADDR (0xFFAA)
+#define TOD3_PTP_PLL_ADDR (0xFFAB)
+
+#define TOD0_OUT_ALIGN_MASK_ADDR (0xFFB0)
+#define TOD1_OUT_ALIGN_MASK_ADDR (0xFFB2)
+#define TOD2_OUT_ALIGN_MASK_ADDR (0xFFB4)
+#define TOD3_OUT_ALIGN_MASK_ADDR (0xFFB6)
#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+#define DEFAULT_TOD0_PTP_PLL (0)
+#define DEFAULT_TOD1_PTP_PLL (1)
+#define DEFAULT_TOD2_PTP_PLL (2)
+#define DEFAULT_TOD3_PTP_PLL (3)
+
#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS (150000)
-#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define PHASE_PULL_IN_THRESHOLD_NS_V487 (15000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
#define TOD_BYTE_COUNT (11)
#define WR_PHASE_SETUP_MS (5000)
+#define OUTPUT_MODULE_FROM_INDEX(index) (OUTPUT_0 + (index) * 0x10)
+
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
PLL_MODE_MIN = 0,
@@ -48,7 +64,8 @@ enum pll_mode {
PLL_MODE_GPIO_INC_DEC = 3,
PLL_MODE_SYNTHESIS = 4,
PLL_MODE_PHASE_MEASUREMENT = 5,
- PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
};
enum hw_tod_write_trig_sel {
@@ -63,6 +80,26 @@ enum hw_tod_write_trig_sel {
WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
};
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+
struct idtcm;
struct idtcm_channel {
@@ -79,15 +116,17 @@ struct idtcm_channel {
u16 tod_n;
u16 hw_dpll_n;
enum pll_mode pll_mode;
+ u8 pll;
u16 output_mask;
int write_phase_ready;
};
struct idtcm {
- struct idtcm_channel channel[MAX_PHC_PLL];
+ struct idtcm_channel channel[MAX_TOD];
struct i2c_client *client;
u8 page_offset;
- u8 pll_mask;
+ u8 tod_mask;
+ char version[16];
/* Overhead calculation for adjtime */
u8 calculate_overhead_flag;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c7fade836d83..5fbe9eae84d1 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -231,7 +231,7 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
- return -ENOMEM;
+ return -ENOSPC;
dmb->sba_idx = bit;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 3d54c8bbfa86..ecfd6d152e86 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -764,6 +764,7 @@ struct qeth_rx {
u8 buf_element;
int e_offset;
int qdio_err;
+ u8 bufs_refill;
};
struct carrier_info {
@@ -833,7 +834,6 @@ struct qeth_card {
struct napi_struct napi;
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
- int reclaim_index;
struct work_struct close_dev_work;
};
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8a76022fceda..bba1b54b8aa3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -204,12 +204,17 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
+ struct qeth_qdio_q *queue = card->qdio.in_q;
+ unsigned int i;
QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list){
list_del(&pool_entry->list);
}
+
+ for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
+ queue->bufs[i].pool_entry = NULL;
}
EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
@@ -2965,7 +2970,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
static int qeth_init_input_buffer(struct qeth_card *card,
struct qeth_qdio_buffer *buf)
{
- struct qeth_buffer_pool_entry *pool_entry;
+ struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
int i;
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
@@ -2976,9 +2981,13 @@ static int qeth_init_input_buffer(struct qeth_card *card,
return -ENOMEM;
}
- pool_entry = qeth_find_free_buffer_pool_entry(card);
- if (!pool_entry)
- return -ENOBUFS;
+ if (!pool_entry) {
+ pool_entry = qeth_find_free_buffer_pool_entry(card);
+ if (!pool_entry)
+ return -ENOBUFS;
+
+ buf->pool_entry = pool_entry;
+ }
/*
* since the buffer is accessed only from the input_tasklet
@@ -2986,8 +2995,6 @@ static int qeth_init_input_buffer(struct qeth_card *card,
* the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
* buffers
*/
-
- buf->pool_entry = pool_entry;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
buf->buffer->element[i].addr =
@@ -3015,6 +3022,7 @@ static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
static int qeth_init_qdio_queues(struct qeth_card *card)
{
+ unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
unsigned int i;
int rc;
@@ -3026,16 +3034,14 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
qeth_initialize_working_pool_list(card);
/*give only as many buffers to hardware as we have buffer pool entries*/
- for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+ for (i = 0; i < rx_bufs; i++) {
rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
if (rc)
return rc;
}
- card->qdio.in_q->next_buf_to_init =
- card->qdio.in_buf_pool.buf_count - 1;
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
- card->qdio.in_buf_pool.buf_count - 1);
+ card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
@@ -3485,20 +3491,15 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
return 0;
}
-static void qeth_queue_input_buffer(struct qeth_card *card, int index)
+static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
+ unsigned int count)
{
struct qeth_qdio_q *queue = card->qdio.in_q;
struct list_head *lh;
- int count;
int i;
int rc;
int newcount = 0;
- count = (index < queue->next_buf_to_init)?
- card->qdio.in_buf_pool.buf_count -
- (queue->next_buf_to_init - index) :
- card->qdio.in_buf_pool.buf_count -
- (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
/* only requeue at a certain threshold to avoid SIGAs */
if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
for (i = queue->next_buf_to_init;
@@ -3526,21 +3527,13 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
i++;
if (i == card->qdio.in_buf_pool.buf_count) {
QETH_CARD_TEXT(card, 2, "qsarbw");
- card->reclaim_index = index;
schedule_delayed_work(
&card->buffer_reclaim_work,
QETH_RECLAIM_WORK_TIME);
}
- return;
+ return 0;
}
- /*
- * according to old code it should be avoided to requeue all
- * 128 buffers in order to benefit from PCI avoidance.
- * this function keeps at least one buffer (the buffer at
- * 'index') un-requeued -> this buffer is the first buffer that
- * will be requeued the next time
- */
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
if (rc) {
@@ -3548,7 +3541,10 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
}
queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
count);
+ return count;
}
+
+ return 0;
}
static void qeth_buffer_reclaim_work(struct work_struct *work)
@@ -3556,8 +3552,10 @@ static void qeth_buffer_reclaim_work(struct work_struct *work)
struct qeth_card *card = container_of(work, struct qeth_card,
buffer_reclaim_work.work);
- QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
- qeth_queue_input_buffer(card, card->reclaim_index);
+ local_bh_disable();
+ napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
}
static void qeth_handle_send_error(struct qeth_card *card,
@@ -5736,6 +5734,7 @@ static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
{
+ struct qeth_rx *ctx = &card->rx;
unsigned int work_done = 0;
while (budget > 0) {
@@ -5771,8 +5770,11 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
if (done) {
QETH_CARD_STAT_INC(card, rx_bufs);
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
- qeth_queue_input_buffer(card, card->rx.b_index);
+ buffer->pool_entry = NULL;
card->rx.b_count--;
+ ctx->bufs_refill++;
+ ctx->bufs_refill -= qeth_rx_refill_queue(card,
+ ctx->bufs_refill);
/* Step forward to next buffer: */
card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
@@ -5812,9 +5814,16 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (card->options.cq == QETH_CQ_ENABLED)
qeth_cq_poll(card);
- /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
- if (budget && work_done >= budget)
- return work_done;
+ if (budget) {
+ struct qeth_rx *ctx = &card->rx;
+
+ /* Process any substantial refill backlog: */
+ ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
+
+ /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
+ if (work_done >= budget)
+ return work_done;
+ }
if (napi_complete_done(napi, work_done) &&
qdio_start_irq(CARD_DDEV(card)))
@@ -7001,6 +7010,7 @@ int qeth_stop(struct net_device *dev)
}
napi_disable(&card->napi);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
qdio_stop_irq(CARD_DDEV(card));
return 0;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ef7a2db7a724..8b342a88ff5c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -285,7 +285,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_clear_ipacmd_list(card);
qeth_drain_output_queues(card);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
card->state = CARD_STATE_DOWN;
}
@@ -1141,6 +1140,10 @@ static void qeth_bridge_state_change(struct qeth_card *card,
int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng");
+ if (qports->num_entries == 0) {
+ QETH_CARD_TEXT(card, 2, "BPempty");
+ return;
+ }
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 15a12487ff7a..fe44b0249e34 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1169,7 +1169,6 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
qeth_drain_output_queues(card);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
card->state = CARD_STATE_DOWN;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0ba7a65e7c8d..06056e9ec333 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -547,6 +547,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
scsi_uninit_cmd(cmd);
}
+static void scsi_run_queue_async(struct scsi_device *sdev)
+{
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list))
+ kblockd_schedule_work(&sdev->requeue_work);
+ else
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+}
+
/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes)
@@ -591,11 +600,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
__blk_mq_end_request(req, error);
- if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
- kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(q, true);
+ scsi_run_queue_async(sdev);
percpu_ref_put(&q->q_usage_counter);
return false;
@@ -1702,6 +1707,7 @@ out_put_budget:
*/
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
+ scsi_run_queue_async(sdev);
break;
}
return ret;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 560649be9d13..e035c9f757a1 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1032_private *devpriv = dev->private;
- unsigned int shift, oldmask;
+ unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
- oldmask = (1U << shift) - 1;
+ if (shift < 32) {
+ oldmask = (1U << shift) - 1;
+ himask = data[4] << shift;
+ lomask = data[5] << shift;
+ } else {
+ oldmask = 0xffffffffu;
+ himask = 0;
+ lomask = 0;
+ }
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA |
@@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 689acd69a1b9..816dd25b9d0e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
struct apci1500_private *devpriv = dev->private;
unsigned int trig = data[1];
unsigned int shift = data[3];
- unsigned int hi_mask = data[4] << shift;
- unsigned int lo_mask = data[5] << shift;
- unsigned int chan_mask = hi_mask | lo_mask;
- unsigned int old_mask = (1 << shift) - 1;
+ unsigned int hi_mask;
+ unsigned int lo_mask;
+ unsigned int chan_mask;
+ unsigned int old_mask;
unsigned int pm;
unsigned int pt;
unsigned int pp;
+ unsigned int invalid_chan;
if (trig > 1) {
dev_dbg(dev->class_dev,
@@ -466,7 +467,20 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
return -EINVAL;
}
- if (chan_mask > 0xffff) {
+ if (shift <= 16) {
+ hi_mask = data[4] << shift;
+ lo_mask = data[5] << shift;
+ old_mask = (1U << shift) - 1;
+ invalid_chan = (data[4] | data[5]) >> (16 - shift);
+ } else {
+ hi_mask = 0;
+ lo_mask = 0;
+ old_mask = 0xffff;
+ invalid_chan = data[4] | data[5];
+ }
+ chan_mask = hi_mask | lo_mask;
+
+ if (invalid_chan) {
dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 10501fe6bb25..1268ba34be5f 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int shift, oldmask;
+ unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
- oldmask = (1U << shift) - 1;
+ if (shift < 32) {
+ oldmask = (1U << shift) - 1;
+ himask = data[4] << shift;
+ lomask = data[5] << shift;
+ } else {
+ oldmask = 0xffffffffu;
+ himask = 0;
+ lomask = 0;
+ }
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
@@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 4d1eccb5041d..4518c2680b7c 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev,
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
/* check shift amount */
shift = data[3];
- if (shift >= s->n_chan) {
+ if (shift >= 32) {
mask = 0;
rising = 0;
falling = 0;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 4689b2170e4f..456603fd26c0 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
-
+ const struct usb_endpoint_descriptor *epd;
+ const struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
+ if (iface_desc->desc.bNumEndpoints != 2) {
+ result = -ENODEV;
+ goto failed;
+ }
+
+ result = -EINVAL;
+ epd = &iface_desc->endpoint[1].desc;
+ if (!usb_endpoint_is_bulk_in(epd))
+ goto failed;
+ epd = &iface_desc->endpoint[2].desc;
+ if (!usb_endpoint_is_bulk_out(epd))
+ goto failed;
+
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
if (!wlandev) {
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index fc118f649887..cae61d1ebec5 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void)
*/
up->mcr_mask = ~ALPHA_KLUDGE_MCR;
up->mcr_force = ALPHA_KLUDGE_MCR;
+ serial8250_set_defaults(up);
}
/* chain base port ops to support Remote Supervisor Adapter */
@@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void)
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
- serial8250_set_defaults(up);
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index ddb6aeb76dc5..04b9af7ed941 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -326,7 +326,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
* devices will export them as GPIOs, so we pre-configure them safely
* as inputs.
*/
- u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00;
+
+ u8 dir = 0x00;
+
+ if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) &&
+ (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) {
+ // Configure GPIO as inputs for Commtech adapters
+ dir = 0xff;
+ } else {
+ // Configure GPIO as outputs for SeaLevel adapters
+ dir = 0x00;
+ }
writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index f839380c2f4c..98b8a3e30733 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -306,8 +306,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
}
#endif
+ /*
+ * Store the requested baud rate before calling the generic 8250
+ * set_termios method. Standard 8250 port expects bauds to be
+ * no higher than (uartclk / 16) so the baud will be clamped if it
+ * gets out of that bound. Mediatek 8250 port supports speed
+ * higher than that, therefore we'll get original baud rate back
+ * after calling the generic set_termios method and recalculate
+ * the speed later in this method.
+ */
+ baud = tty_termios_baud_rate(termios);
+
serial8250_do_set_termios(port, termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
/*
* Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS)
*
@@ -339,6 +352,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
spin_lock_irqsave(&port->lock, flags);
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
/* set DLAB we have cval saved in up->lcr from the call to the core */
serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
serial_dl_write(up, quot);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 8de8bac9c6c7..04d1b0807e66 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -635,7 +635,7 @@ static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
}
static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
- struct tty_port *tty)
+ struct tty_port *port)
{
do {
char flag = TTY_NORMAL;
@@ -653,16 +653,18 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
ch = (unsigned char) tegra_uart_read(tup, UART_RX);
tup->uport.icount.rx++;
- if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
- tty_insert_flip_char(tty, ch, flag);
+ if (uart_handle_sysrq_char(&tup->uport, ch))
+ continue;
if (tup->uport.ignore_status_mask & UART_LSR_DR)
continue;
+
+ tty_insert_flip_char(port, ch, flag);
} while (1);
}
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
- struct tty_port *tty,
+ struct tty_port *port,
unsigned int count)
{
int copied;
@@ -672,17 +674,13 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
return;
tup->uport.icount.rx += count;
- if (!tty) {
- dev_err(tup->uport.dev, "No tty port\n");
- return;
- }
if (tup->uport.ignore_status_mask & UART_LSR_DR)
return;
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
count, DMA_FROM_DEVICE);
- copied = tty_insert_flip_string(tty,
+ copied = tty_insert_flip_string(port,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count) {
WARN_ON(1);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 672cfa075e28..2833f1418d6d 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1580,8 +1580,10 @@ static int cdns_uart_probe(struct platform_device *pdev)
* If register_console() don't assign value, then console_port pointer
* is cleanup.
*/
- if (!console_port)
+ if (!console_port) {
+ cdns_uart_console.index = id;
console_port = port;
+ }
#endif
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
@@ -1594,8 +1596,10 @@ static int cdns_uart_probe(struct platform_device *pdev)
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/* This is not port which is used for console that's why clean it up */
if (console_port == port &&
- !(cdns_uart_uart_driver.cons->flags & CON_ENABLED))
+ !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) {
console_port = NULL;
+ cdns_uart_console.index = -1;
+ }
#endif
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 48a8199f7845..42d8c67a481f 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1092,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = {
.destruct = vc_port_destruct,
};
+/*
+ * Change # of rows and columns (0 means unchanged/the size of fg_console)
+ * [this is to be used together with some user program
+ * like resize that changes the hardware videomode]
+ */
+#define VC_MAXCOL (32767)
+#define VC_MAXROW (32767)
+
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
struct vc_data *vc;
+ int err;
WARN_CONSOLE_UNLOCKED();
@@ -1125,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
+ err = -EINVAL;
+ if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW ||
+ vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size)
+ goto err_free;
+ err = -ENOMEM;
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@@ -1143,7 +1157,7 @@ err_free:
visual_deinit(vc);
kfree(vc);
vc_cons[currcons].d = NULL;
- return -ENOMEM;
+ return err;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
@@ -1158,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
return err;
}
-/*
- * Change # of rows and columns (0 means unchanged/the size of fg_console)
- * [this is to be used together with some user program
- * like resize that changes the hardware videomode]
- */
-#define VC_RESIZE_MAXCOL (32767)
-#define VC_RESIZE_MAXROW (32767)
-
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
@@ -1201,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
user = vc->vc_resize_user;
vc->vc_resize_user = 0;
- if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
+ if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
new_cols = (cols ? cols : vc->vc_cols);
@@ -1212,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
- if (new_screen_size > KMALLOC_MAX_SIZE)
+ if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
@@ -3393,6 +3399,7 @@ static int __init con_init(void)
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
+ /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index fea555570ad4..45c54d56ecbd 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -557,6 +557,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
if (is_fs_or_ls(speed) && !has_tt)
return false;
+ /* skip endpoint with zero maxpkt */
+ if (usb_endpoint_maxp(&ep->desc) == 0)
+ return false;
+
return true;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ef513c2fb843..9234c82e70e4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -265,6 +265,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1142)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x2142)
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 2eaf5c0af80c..ee6bf01775bb 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -856,7 +856,7 @@ static int tegra_xusb_init_context(struct tegra_xusb *tegra)
if (!tegra->context.ipfs)
return -ENOMEM;
- tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets,
sizeof(u32), GFP_KERNEL);
if (!tegra->context.fpci)
return -ENOMEM;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6fb4d7ecfa19..b22adf03f584 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- switch (v_req.type) {
+ switch (vhost32_to_cpu(vq, v_req.type)) {
case VIRTIO_SCSI_T_TMF:
vc.req = &v_req.tmf;
vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index ca935c09a261..35ebeeccde4d 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = info->var.xoffset + rs;
region.dy = 0;
region.width = rw;
@@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset + bs;
region.width = rs;
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index dfa9a8aa4509..78f3a5621478 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset;
region.height = rw;
@@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset + bs;
region.dy = 0;
region.height = info->var.yres_virtual;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index ce08251bfd38..fd098ff17574 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset + rs;
region.height = rw;
@@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset;
region.height = info->var.yres;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 1936afc78fec..e165a3fad29a 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dy = 0;
region.dx = info->var.xoffset;
region.width = rw;
@@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dy = info->var.yoffset;
region.dx = info->var.xoffset;
region.height = bh;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 5809e5f5b157..5c92e4a50882 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -85,7 +85,7 @@ config VIRTIO_MEM
depends on VIRTIO
depends on MEMORY_HOTPLUG_SPARSE
depends on MEMORY_HOTREMOVE
- select CONTIG_ALLOC
+ depends on CONTIG_ALLOC
help
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f157d2f4952..8be02f333b7a 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb)
static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
- &vb->config_read_bitmap))
+ &vb->config_read_bitmap)) {
virtio_cread(vb->vdev, struct virtio_balloon_config,
free_page_hint_cmd_id,
&vb->cmd_id_received_cache);
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+ }
return vb->cmd_id_received_cache;
}
@@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev)
/*
* Let the hypervisor know that we are expecting a
* specific value to be written back in balloon pages.
+ *
+ * If the PAGE_POISON value was larger than a byte we would
+ * need to byte swap poison_val here to guarantee it is
+ * little-endian. However for now it is a single byte so we
+ * can pass it as-is.
*/
if (!want_init_on_free())
memset(&poison_val, PAGE_POISON, sizeof(poison_val));