summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/8390.h7
-rw-r--r--drivers/net/ethernet/8390/apne.c61
-rw-r--r--drivers/net/ethernet/8390/ax88796.c23
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c120
-rw-r--r--drivers/net/ethernet/8390/etherh.c53
-rw-r--r--drivers/net/ethernet/8390/hydra.c11
-rw-r--r--drivers/net/ethernet/8390/lib8390.c77
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c9
-rw-r--r--drivers/net/ethernet/8390/ne.c96
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c54
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c63
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c48
-rw-r--r--drivers/net/ethernet/8390/stnic.c28
-rw-r--r--drivers/net/ethernet/8390/wd.c42
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c22
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c22
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c1
-rw-r--r--drivers/net/ethernet/alteon/acenic.c1
-rw-r--r--drivers/net/ethernet/amd/7990.c837
-rw-r--r--drivers/net/ethernet/amd/7990.h268
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h3
-rw-r--r--drivers/net/ethernet/amd/hplance.c96
-rw-r--r--drivers/net/ethernet/amd/mvme147.c36
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c20
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h3
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c101
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c58
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h71
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c50
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h52
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c31
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c30
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c250
-rw-r--r--drivers/net/ethernet/broadcom/b44.h15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c364
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c101
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h124
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c66
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c213
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c277
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c181
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c39
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c230
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c625
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c559
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c126
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c24
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c178
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c164
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c99
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/icplus/ipg.h1
-rw-r--r--drivers/net/ethernet/intel/Kconfig49
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c237
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c666
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c469
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c400
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c432
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1754
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c662
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h170
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c195
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c876
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c927
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h106
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2153
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c254
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h238
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1575
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h296
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1152
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h364
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h321
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c390
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2353
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c772
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c108
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c303
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h96
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1276
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c387
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c37
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h8
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c17
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h197
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c425
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h45
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c144
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c127
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c186
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c444
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c266
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c236
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c133
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/rdc/r6040.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c263
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h56
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c549
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon.c38
-rw-r--r--drivers/net/ethernet/sfc/farch.c48
-rw-r--r--drivers/net/ethernet/sfc/filter.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c444
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h75
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h34
-rw-r--r--drivers/net/ethernet/sfc/ptp.c854
-rw-r--r--drivers/net/ethernet/sfc/rx.c24
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c119
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c140
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c475
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c135
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.h4
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c137
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/tile/Kconfig12
-rw-r--r--drivers/net/ethernet/tile/tilegx.c38
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c18
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c36
412 files changed, 33619 insertions, 8473 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index ede8daa68275..c53384d41c96 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -252,8 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
for (i = 0; i < el3_cards; i++) {
struct el3_private *lp = netdev_priv(el3_devs[i]);
if (lp->type == EL3_PNP &&
- !memcmp(phys_addr, el3_devs[i]->dev_addr,
- ETH_ALEN)) {
+ ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) {
if (el3_debug > 3)
pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 6fc994fa4abe..b9948f00c5e9 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -73,7 +73,6 @@ earlier 3Com products.
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 078480aaa168..5992860a39c9 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -25,7 +25,6 @@
#define DRV_VERSION "1.162-ac"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b348f0..0f4241c6e97e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -693,7 +693,7 @@ DEFINE_WINDOW_IO(16)
DEFINE_WINDOW_IO(32)
#ifdef CONFIG_PCI
-#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
@@ -2079,10 +2079,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
iowrite16(len, ioaddr + Wn7_MasterLen);
spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
+ skb_tx_timestamp(skb);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
+ skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
@@ -2212,6 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
+ skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
return NETDEV_TX_OK;
@@ -2986,6 +2989,7 @@ static const struct ethtool_ops vortex_ethtool_ops = {
.nway_reset = vortex_nway_reset,
.get_wol = vortex_get_wol,
.set_wol = vortex_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
};
#ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 2923c51bb351..3e2f2c2e7b58 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -21,12 +21,6 @@ struct e8390_pkt_hdr {
unsigned short count; /* header + packet length in bytes */
};
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev);
void eip_poll(struct net_device *dev);
@@ -99,6 +93,7 @@ struct ei_device {
u32 *reg_offset; /* Register mapping table */
spinlock_t page_lock; /* Page register locks */
unsigned long priv; /* Private field to store bus IDs etc. */
+ u32 msg_enable; /* debug message level */
#ifdef AX88796_PLATFORM
unsigned char rxcr_base; /* default value for RXCR */
#endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 912ed7a5f33a..30104b60da85 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -116,9 +116,15 @@ static const char version[] =
static int apne_owned; /* signal if card already owned */
+static u32 apne_msg_enable;
+module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
+ struct ei_device *ei_local;
+
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
@@ -133,11 +139,11 @@ struct net_device * __init apne_probe(int unit)
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
- printk("Looking for PCMCIA ethernet card : ");
+ pr_info("Looking for PCMCIA ethernet card : ");
/* check if a card is inserted */
if (!(PCMCIA_INSERTED)) {
- printk("NO PCMCIA card inserted\n");
+ pr_cont("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
@@ -148,6 +154,8 @@ struct net_device * __init apne_probe(int unit)
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = apne_msg_enable;
/* disable pcmcia irq for readtuple */
pcmcia_disable_irq();
@@ -155,14 +163,14 @@ struct net_device * __init apne_probe(int unit)
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
- printk("not an ethernet card\n");
+ pr_cont("not an ethernet card\n");
/* XXX: shouldn't we re-enable irq here? */
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
- printk("ethernet PCMCIA card inserted\n");
+ pr_cont("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
/* XXX: shouldn't we re-enable irq here? */
@@ -205,10 +213,10 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
#endif
static unsigned version_printed;
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
- printk("PCMCIA NE*000 ethercard probe");
+ netdev_info(dev, "PCMCIA NE*000 ethercard probe");
/* Reset card. Who knows what dain-bramaged state it was left in. */
{ unsigned long reset_start_time = jiffies;
@@ -217,7 +225,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
return -ENODEV;
}
@@ -288,7 +296,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
- printk(" not found.\n");
+ pr_cont(" not found.\n");
return -ENXIO;
}
@@ -320,9 +328,9 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
+ pr_cont(" %pM\n", dev->dev_addr);
- printk("%s: %s found.\n", dev->name, name);
+ netdev_info(dev, "%s found.\n", name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -352,10 +360,11 @@ static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
init_pcmcia();
- if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -365,8 +374,8 @@ apne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk("%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
+ break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
}
@@ -386,9 +395,9 @@ apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pa
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
@@ -433,9 +442,9 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -481,9 +490,9 @@ apne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d][intr:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -513,7 +522,7 @@ apne_block_output(struct net_device *dev, int count,
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -536,8 +545,8 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
- if (ei_debug > 3)
- printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ if (apne_msg_enable & NETIF_MSG_INTR)
+ pr_debug("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 36fa577970bb..455d4c399b52 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/isapnp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -78,6 +77,8 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define AX_GPOC_PPDSET BIT(6)
+static u32 ax_msg_enable;
+
/* device private data */
struct ax_device {
@@ -147,8 +148,7 @@ static void ax_reset_8390(struct net_device *dev)
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -496,12 +496,28 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phy_dev, cmd);
}
+static u32 ax_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ax_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = ax_get_msglevel,
+ .set_msglevel = ax_set_msglevel,
};
#ifdef CONFIG_AX88796_93CX6
@@ -763,6 +779,7 @@ static int ax_init_dev(struct net_device *dev)
ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
+ ei_local->msg_enable = ax_msg_enable;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d801c1410fb0..73c57a4a7b9e 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -105,6 +104,7 @@ static void AX88190_init(struct net_device *dev, int startp);
static int ax_open(struct net_device *dev);
static int ax_close(struct net_device *dev);
static irqreturn_t ax_interrupt(int irq, void *dev_id);
+static u32 axnet_msg_enable;
/*====================================================================*/
@@ -152,6 +152,7 @@ static int axnet_probe(struct pcmcia_device *link)
return -ENOMEM;
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = axnet_msg_enable;
spin_lock_init(&ei_local->page_lock);
info = PRIV(dev);
@@ -650,11 +651,12 @@ static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
unsigned int nic_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
int xfer_count = count;
char *buf = skb->data;
- if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -810,11 +812,6 @@ module_pcmcia_driver(axnet_cs_driver);
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -925,11 +922,10 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?",
- txsr, isr, tickssofar);
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -998,29 +994,30 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
- ei_local->tx2, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
- ei_local->tx1, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2,
- ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1124,10 +1121,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
-
- if (ei_debug > 3)
- netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
- inb_p(e8390_base + EN0_ISR));
+
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1137,9 +1133,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
++nr_serviced < MAX_SERVICE)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
- if (ei_debug > 1)
- netdev_warn(dev,
- "interrupt from stopped card\n");
+ netif_warn(ei_local, intr, dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1175,14 +1170,15 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
}
}
- if (interrupts && ei_debug > 3)
+ if (interrupts && (netif_msg_intr(ei_local)))
{
handled = 1;
if (nr_serviced >= MAX_SERVICE)
{
/* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ if (interrupts != 0xFF)
+ netdev_warn(dev,
+ "Too much work at interrupt, status %#2.2x\n",
interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
@@ -1221,8 +1217,7 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- netdev_printk(KERN_DEBUG, dev,
- "transmitter error (%#2x):", txsr);
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
@@ -1287,9 +1282,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
- ei_local->name, ei_local->lasttx,
- ei_local->tx2);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1366,9 +1361,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
- this_frame, ei_local->current_page);
+ if ((netif_msg_rx_err(ei_local)) &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1383,11 +1380,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1398,10 +1394,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
{
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev,
- "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1420,11 +1415,10 @@ static void ei_receive(struct net_device *dev)
}
else
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1461,6 +1455,7 @@ static void ei_rx_overrun(struct net_device *dev)
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Record whether a Tx was in progress and then issue the
@@ -1468,9 +1463,8 @@ static void ei_rx_overrun(struct net_device *dev)
*/
was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 78c6fb4b1143..b36ee9e0d220 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -56,18 +56,15 @@
#define ei_inb_p(_p) readb((void __iomem *)_p)
#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
-#define NET_DEBUG 0
-#define DEBUG_INIT 2
-
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
-static char version[] __initdata =
+static char version[] =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
#include "lib8390.c"
-static unsigned int net_debug = NET_DEBUG;
+static u32 etherh_msg_enable;
struct etherh_priv {
void __iomem *ioc_fast;
@@ -317,9 +314,9 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -361,8 +358,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
- dev->name);
+ netdev_warn(dev, "timeout waiting for TX RDC\n");
etherh_reset (dev);
__NS8390_init (dev, 1);
break;
@@ -383,9 +379,9 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -423,9 +419,9 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -513,8 +509,8 @@ static void __init etherh_banner(void)
{
static int version_printed;
- if (net_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/*
@@ -625,11 +621,27 @@ static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
+static u32 etherh_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void etherh_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops etherh_ethtool_ops = {
.get_settings = etherh_get_settings,
.set_settings = etherh_set_settings,
.get_drvinfo = etherh_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = etherh_get_msglevel,
+ .set_msglevel = etherh_set_msglevel,
};
static const struct net_device_ops etherh_netdev_ops = {
@@ -746,6 +758,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
+ ei_local->msg_enable = etherh_msg_enable;
etherh_reset(dev);
__NS8390_init(dev, 0);
@@ -754,8 +767,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: %s in slot %d, %pM\n",
- dev->name, data->name, ec->slot_no, dev->dev_addr);
+ netdev_info(dev, "%s in slot %d, %pM\n",
+ data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index f615fdec0f1b..0fe19d609c2e 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,6 +66,7 @@ static void hydra_block_input(struct net_device *dev, int count,
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
+static u32 hydra_msg_enable;
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -119,6 +120,7 @@ static int hydra_init(struct zorro_dev *z)
int start_page, stop_page;
int j;
int err;
+ struct ei_device *ei_local;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -137,6 +139,8 @@ static int hydra_init(struct zorro_dev *z)
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = hydra_msg_enable;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
@@ -187,15 +191,16 @@ static int hydra_open(struct net_device *dev)
static int hydra_close(struct net_device *dev)
{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
__ei_close(dev);
return 0;
}
static void hydra_reset_8390(struct net_device *dev)
{
- printk(KERN_INFO "Hydra hw reset not there\n");
+ netdev_info(dev, "Hydra hw reset not there\n");
}
static void hydra_get_8390_hdr(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b329f5c0d62b..d2cd80444ade 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -99,11 +99,6 @@
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -116,6 +111,11 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
+static unsigned version_printed;
+static u32 msg_enable;
+module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/*
* SMP and the 8390 setup.
*
@@ -345,19 +345,23 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
if (ei_local->tx1 == 0) {
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
ei_local->tx2, ei_local->lasttx, ei_local->txing);
} else if (ei_local->tx2 == 0) {
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
ei_local->tx1, ei_local->lasttx, ei_local->txing);
} else { /* We should never get here. */
- if (ei_debug)
- netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -388,7 +392,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
} else
ei_local->txqueue++;
- if (ei_local->tx1 && ei_local->tx2)
+ if (ei_local->tx1 && ei_local->tx2)
netif_stop_queue(dev);
else
netif_start_queue(dev);
@@ -445,9 +449,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
/* Change to page 0 and read the intr status reg. */
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
- ei_inb_p(e8390_base + EN0_ISR));
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ ei_inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
@@ -485,7 +488,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
- if (interrupts && ei_debug) {
+ if (interrupts && (netif_msg_intr(ei_local))) {
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
if (nr_serviced >= MAX_SERVICE) {
/* 0xFF is valid for a card removal */
@@ -676,10 +679,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 &&
+ if ((netif_msg_rx_status(ei_local)) &&
this_frame != ei_local->current_page &&
(this_frame != 0x0 || rxing_page != 0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ netdev_err(dev,
+ "mismatched read page pointers %2x vs %2x\n",
this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
@@ -707,10 +711,10 @@ static void ei_receive(struct net_device *dev)
}
if (pkt_len < 60 || pkt_len > 1518) {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_dbg(ei_local, rx_status, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
@@ -718,9 +722,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- if (ei_debug > 1)
- netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
} else {
@@ -736,10 +740,10 @@ static void ei_receive(struct net_device *dev)
dev->stats.multicast++;
}
} else {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -789,8 +793,7 @@ static void ei_rx_overrun(struct net_device *dev)
was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- if (ei_debug > 1)
- netdev_dbg(dev, "Receiver overrun\n");
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -965,8 +968,9 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(version);
+
+ if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
ether_setup(dev);
@@ -1035,9 +1039,10 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
for (i = 0; i < 6; i++) {
ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 &&
+ if ((netif_msg_probe(ei_local)) &&
ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
- netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ netdev_err(dev,
+ "Hw. address read/write mismap %d\n", i);
}
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 88ccc8b14f0a..90e825e8abfe 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -167,6 +167,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+static u32 mac8390_msg_enable;
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
@@ -402,6 +403,7 @@ struct net_device * __init mac8390_probe(int unit)
struct net_device *dev;
struct nubus_dev *ndev = NULL;
int err = -ENODEV;
+ struct ei_device *ei_local;
static unsigned int slots;
@@ -440,6 +442,10 @@ struct net_device * __init mac8390_probe(int unit)
if (!ndev)
goto out;
+
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mac8390_msg_enable;
+
err = register_netdev(dev);
if (err)
goto out;
@@ -660,19 +666,22 @@ static int mac8390_close(struct net_device *dev)
static void mac8390_no_reset(struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
ei_status.txing = 0;
- if (ei_debug > 1)
- pr_info("reset not supported\n");
+ netif_info(ei_local, hw, dev, "reset not supported\n");
}
static void interlan_reset(struct net_device *dev)
{
unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
- if (ei_debug > 1)
- pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...",
+ jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
- if (ei_debug > 1)
+ if (netif_msg_hw(ei_local))
pr_cont("reset complete\n");
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 230efd6fa5d5..38fcdcf7c4c7 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -39,6 +38,7 @@ static const char version[] =
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+static u32 mcf8390_msg_enable;
#ifdef NE2000_ODDOFFSET
/*
@@ -153,9 +153,9 @@ static void mcf8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
u32 addr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -288,7 +288,7 @@ static void mcf8390_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
mcf8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -437,6 +437,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mcf8390_msg_enable;
dev->irq = irq->start;
dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index b2e840513735..58eaa8f34942 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -71,14 +71,17 @@ static struct platform_device *pdev_ne[MAX_NE_CARDS];
static int io[MAX_NE_CARDS];
static int irq[MAX_NE_CARDS];
static int bad[MAX_NE_CARDS];
+static u32 ne_msg_enable;
#ifdef MODULE
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(bad, int, NULL, 0);
+module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es),required");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
#endif /* MODULE */
@@ -214,8 +217,8 @@ static int __init do_ne_probe(struct net_device *dev)
if (base_addr > 0x1ff) { /* Check a single specified location. */
int ret = ne_probe1(dev, base_addr);
if (ret)
- printk(KERN_WARNING "ne.c: No NE*000 card found at "
- "i/o = %#lx\n", base_addr);
+ netdev_warn(dev, "ne.c: No NE*000 card found at "
+ "i/o = %#lx\n", base_addr);
return ret;
}
else if (base_addr != 0) /* Don't probe at all. */
@@ -264,11 +267,14 @@ static int __init ne_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) isapnp_clone_list[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ netdev_err(dev,
+ "ne.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
pnp_device_detach(idev);
return -ENXIO;
}
@@ -293,6 +299,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
int neX000, ctron, copam, bad_card;
int reg0, ret;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -319,10 +326,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
}
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s%s", version1, version2);
+ if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, "%s%s", version1, version2);
- printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+ netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr);
/* A user with a poor card that fails to ack the reset, or that
does not have a valid 0x57,0x57 signature can still use this
@@ -343,10 +350,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
if (bad_card) {
- printk(" (warning: no reset ack)");
+ pr_cont(" (warning: no reset ack)");
break;
} else {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
ret = -ENODEV;
goto err_out;
}
@@ -454,13 +461,13 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
if (bad_clone_list[i].name8 == NULL)
{
- printk(" not found (invalid signature %2.2x %2.2x).\n",
+ pr_cont(" not found (invalid signature %2.2x %2.2x).\n",
SA_prom[14], SA_prom[15]);
ret = -ENXIO;
goto err_out;
}
#else
- printk(" not found.\n");
+ pr_cont(" not found.\n");
ret = -ENXIO;
goto err_out;
#endif
@@ -476,15 +483,15 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = probe_irq_off(cookie);
- if (ei_debug > 2)
- printk(" autoirq is %d\n", dev->irq);
+ if (netif_msg_probe(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
or don't know which one to set. */
dev->irq = 9;
if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
+ pr_cont(" failed to detect IRQ line.\n");
ret = -EAGAIN;
goto err_out;
}
@@ -493,7 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
share and the board will usually be enabled. */
ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
goto err_out;
}
@@ -512,7 +519,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
#endif
- printk("%pM\n", dev->dev_addr);
+ pr_cont("%pM\n", dev->dev_addr);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -536,11 +543,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->netdev_ops = &eip_netdev_ops;
NS8390p_init(dev, 0);
+ ei_local->msg_enable = ne_msg_enable;
ret = register_netdev(dev);
if (ret)
goto out_irq;
- printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
+ netdev_info(dev, "%s found at %#lx, using IRQ %d.\n",
+ name, ioaddr, dev->irq);
return 0;
out_irq:
@@ -556,9 +564,9 @@ err_out:
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -569,7 +577,7 @@ static void ne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
break;
}
outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -587,9 +595,9 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -621,6 +629,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
{
#ifdef NE_SANITY_CHECK
int xfer_count = count;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
int nic_base = dev->base_addr;
char *buf = skb->data;
@@ -628,9 +637,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -660,7 +669,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
this message you either 1) have a slightly incompatible clone
or 2) have noise/speed problems with your bus. */
- if (ei_debug > 1)
+ if (netif_msg_rx_status(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -674,9 +683,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ netdev_warn(dev, "RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -690,6 +699,7 @@ static void ne_block_output(struct net_device *dev, int count,
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -702,9 +712,9 @@ static void ne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -751,7 +761,7 @@ retry:
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. */
- if (ei_debug > 1)
+ if (netif_msg_tx_queued(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -765,9 +775,9 @@ retry:
if (tries <= 0)
{
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ netdev_warn(dev, "Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -776,7 +786,7 @@ retry:
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne_reset_8390(dev);
NS8390p_init(dev, 1);
break;
@@ -936,8 +946,8 @@ int __init init_module(void)
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (io[0] == 0)
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
- " value(s) for ISA cards.\n");
+ pr_notice("ne.c: You must supply \"io=0xNNN\""
+ " value(s) for ISA cards.\n");
ne_loop_rm_unreg(1);
return retval;
}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index fc14a85e4d5f..f395c967262e 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -33,8 +33,6 @@
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-
#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
@@ -60,6 +58,8 @@ static int options[MAX_UNITS];
#include "8390.h"
+static u32 ne2k_msg_enable;
+
/* These identify the driver base version and may not be removed. */
static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
@@ -76,10 +76,10 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
+module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
@@ -226,6 +226,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
static unsigned int fnd_cnt;
long ioaddr;
int flags = pci_clone_list[chip_idx].flags;
+ struct ei_device *ei_local;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -280,6 +281,8 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
dev->netdev_ops = &ne2k_netdev_ops;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = ne2k_msg_enable;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -379,9 +382,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (i)
goto err_out_free_netdev;
- printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
- dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n",
+ pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
return 0;
@@ -450,9 +453,10 @@ static int ne2k_pci_close(struct net_device *dev)
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
- dev->name, jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n",
+ jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -462,7 +466,7 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
break;
}
outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -479,9 +483,9 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -517,9 +521,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_input "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -572,9 +576,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_output."
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -619,7 +623,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -640,8 +644,24 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
+static u32 ne2k_pci_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
+ .get_msglevel = ne2k_pci_get_msglevel,
+ .set_msglevel = ne2k_pci_set_msglevel,
};
static void ne2k_pci_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 46c5aadaca8e..ca3c2b921cf6 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -67,7 +66,7 @@
#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
static const char *if_names[] = { "auto", "10baseT", "10base2"};
-
+static u32 pcnet_msg_enable;
/*====================================================================*/
@@ -558,6 +557,7 @@ static int pcnet_config(struct pcmcia_device *link)
int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
+ struct ei_device *ei_local;
dev_dbg(&link->dev, "pcnet_config\n");
@@ -607,6 +607,8 @@ static int pcnet_config(struct pcmcia_device *link)
mii_phy_probe(dev);
SET_NETDEV_DEV(dev, &link->dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = pcnet_msg_enable;
if (register_netdev(dev) != 0) {
pr_notice("register_netdev() failed\n");
@@ -616,7 +618,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
- (info->flags & IS_DL10022) ? 22 : 19, id);
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
pr_cont("PNA, ");
} else {
@@ -1063,9 +1065,9 @@ static void ei_watchdog(u_long arg)
if (info->phy_id == info->eth_phy) {
if (p)
netdev_info(dev, "autonegotiation complete: "
- "%sbaseT-%cD selected\n",
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ "%sbaseT-%cD selected\n",
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
else
netdev_info(dev, "link partner did not autonegotiate\n");
}
@@ -1081,7 +1083,7 @@ static void ei_watchdog(u_long arg)
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
netdev_info(dev, "switched to %s transceiver\n",
- (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
info->link_status = 0;
@@ -1128,9 +1130,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1159,13 +1161,14 @@ static void dma_block_input(struct net_device *dev, int count,
unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
+ struct ei_device *ei_local = netdev_priv(dev);
- if ((ei_debug > 4) && (count != 4))
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1183,7 +1186,8 @@ static void dma_block_input(struct net_device *dev, int count,
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_rx_status(ei_local)) {
int addr, tries = 20;
do {
/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
@@ -1196,8 +1200,8 @@ static void dma_block_input(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0)
netdev_notice(dev, "RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- ring_offset + xfer_count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1213,12 +1217,12 @@ static void dma_block_output(struct net_device *dev, int count,
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
u_long dma_start;
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4)
- netdev_dbg(dev, "[bo=%d]\n", count);
+ netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1227,9 +1231,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_output."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1256,7 +1260,8 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_tx_queued(ei_local)) {
int addr, tries = 20;
do {
int high = inb_p(nic_base + EN0_RSARHI);
@@ -1267,8 +1272,8 @@ static void dma_block_output(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0) {
netdev_notice(dev, "Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- (start_page << 8) + count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1277,10 +1282,10 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- netdev_notice(dev, "timeout waiting for Tx RDC.\n");
- pcnet_reset_8390(dev);
- NS8390_init(dev, 1);
- break;
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
}
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index b0fbce39661a..139385dcdaa7 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -111,6 +111,7 @@ static struct isapnp_device_id ultra_device_ids[] __initdata = {
MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
#endif
+static u32 ultra_msg_enable;
#define START_PG 0x00 /* First page of TX buffer */
@@ -211,6 +212,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
unsigned char num_pages, irqreg, addr, piomode;
unsigned char idreg = inb(ioaddr + 7);
unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -232,16 +234,16 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: %s at %#3x, %pM", dev->name, model_name,
- ioaddr, dev->dev_addr);
+ netdev_info(dev, "%s at %#3x, %pM", model_name,
+ ioaddr, dev->dev_addr);
/* Switch from the station address to the alternate register set and
read the useful registers there. */
@@ -265,7 +267,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line.\n");
retval = -EAGAIN;
goto out;
}
@@ -296,7 +298,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
+ pr_cont(", failed to ioremap.\n");
retval = -ENOMEM;
goto out;
}
@@ -304,14 +306,15 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
if (piomode) {
- printk(",%s IRQ %d programmed-I/O mode.\n",
- eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ pr_cont(", %s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
ei_status.block_input = &ultra_pio_input;
ei_status.block_output = &ultra_pio_output;
ei_status.get_8390_hdr = &ultra_pio_get_hdr;
} else {
- printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
- dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(", %s IRQ %d memory %#lx-%#lx.\n",
+ eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start,
+ dev->mem_end-1);
ei_status.block_input = &ultra_block_input;
ei_status.block_output = &ultra_block_output;
ei_status.get_8390_hdr = &ultra_get_8390_hdr;
@@ -320,6 +323,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &ultra_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = ultra_msg_enable;
retval = register_netdev(dev);
if (retval)
@@ -356,12 +360,15 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) ultra_device_ids[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
- pnp_device_detach(idev);
+ netdev_err(dev,
+ "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
+ pnp_device_detach(idev);
return -ENXIO;
}
ei_status.priv = (unsigned long)idev;
@@ -412,9 +419,10 @@ static void
ultra_reset_8390(struct net_device *dev)
{
int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(ULTRA_RESET, cmd_port);
- if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies);
ei_status.txing = 0;
outb(0x00, cmd_port); /* Disable shared memory for safety. */
@@ -424,7 +432,7 @@ ultra_reset_8390(struct net_device *dev)
else
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -530,11 +538,11 @@ static int
ultra_close_card(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
netif_stop_queue(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
outb(0x00, ioaddr + 6); /* Disable interrupts. */
free_irq(dev->irq, dev);
@@ -556,8 +564,10 @@ static int irq[MAX_ULTRA_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
+module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 8df4c4157230..aca957d4e121 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -69,6 +69,11 @@ static void stnic_block_output (struct net_device *dev, int count,
static void stnic_init (struct net_device *dev);
+static u32 stnic_msg_enable;
+
+module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/* SH7750 specific read/write io. */
static inline void
STNIC_DELAY (void)
@@ -100,6 +105,7 @@ static int __init stnic_probe(void)
{
struct net_device *dev;
int i, err;
+ struct ei_device *ei_local;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -125,10 +131,10 @@ static int __init stnic_probe(void)
share and the board will usually be enabled. */
err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (err) {
- printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
- free_netdev(dev);
- return err;
- }
+ netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
ei_status.name = dev->name;
ei_status.word16 = 1;
@@ -147,6 +153,8 @@ static int __init stnic_probe(void)
ei_status.block_output = &stnic_block_output;
stnic_init (dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = stnic_msg_enable;
err = register_netdev(dev);
if (err) {
@@ -156,7 +164,7 @@ static int __init stnic_probe(void)
}
stnic_dev = dev;
- printk (KERN_INFO "NS ST-NIC 83902A\n");
+ netdev_info(dev, "NS ST-NIC 83902A\n");
return 0;
}
@@ -164,10 +172,11 @@ static int __init stnic_probe(void)
static void
stnic_reset (struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
*(vhalf *) PA_83902_RST = 0;
udelay (5);
- if (ei_debug > 1)
- printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies);
*(vhalf *) PA_83902_RST = ~0;
udelay (5);
}
@@ -176,6 +185,8 @@ static void
stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
half buf[2];
STNIC_WRITE (PG0_RSAR0, 0);
@@ -196,8 +207,7 @@ stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
#endif
- if (ei_debug > 1)
- printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n",
ring_page, hdr->status, hdr->next, hdr->count);
STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 03eb3eed49fa..dd7d816bde52 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -60,6 +60,7 @@ static void wd_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static int wd_close(struct net_device *dev);
+static u32 wd_msg_enable;
#define WD_START_PG 0x00 /* First page of TX buffer */
#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
@@ -170,6 +171,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
const char *model_name;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
for (i = 0; i < 8; i++)
checksum += inb(ioaddr + 8 + i);
@@ -180,19 +182,19 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
/* Check for semi-valid mem_start/end values if supplied. */
if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
- printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ netdev_warn(dev,
+ "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
dev->mem_start = 0;
dev->mem_end = 0;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: WD80x3 at %#3x, %pM",
- dev->name, ioaddr, dev->dev_addr);
+ netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
/* The following PureData probe code was contributed by
Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -244,8 +246,9 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
}
#ifndef final_version
if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
- printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
- word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+ pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8,
+ (inb(ioaddr+1) & 0x01) ? 16 : 8);
#endif
}
@@ -259,7 +262,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
if (reg0 == 0xff || reg0 == 0) {
/* Future plan: this could check a few likely locations first. */
dev->mem_start = 0xd0000;
- printk(" assigning address %#lx", dev->mem_start);
+ pr_cont(" assigning address %#lx", dev->mem_start);
} else {
int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
/* Some boards don't have the register 5 -- it returns 0xff. */
@@ -297,8 +300,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
- if (ei_debug > 2)
- printk(" autoirq is %d", dev->irq);
+ if (netif_msg_drv(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
} else
@@ -310,7 +313,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
share and the board will usually be enabled. */
i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (i) {
- printk (" unable to get IRQ %d.\n", dev->irq);
+ pr_cont(" unable to get IRQ %d.\n", dev->irq);
return i;
}
@@ -338,8 +341,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
return -ENOMEM;
}
- printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
- model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
ei_status.reset_8390 = wd_reset_8390;
ei_status.block_input = wd_block_input;
@@ -348,6 +351,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = wd_msg_enable;
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
@@ -385,9 +389,11 @@ static void
wd_reset_8390(struct net_device *dev)
{
int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(WD_RESET, wd_cmd_port);
- if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n",
+ jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
@@ -395,7 +401,7 @@ wd_reset_8390(struct net_device *dev)
if (ei_status.word16)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -474,9 +480,9 @@ static int
wd_close(struct net_device *dev)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
ei_close(dev);
/* Change from 16-bit to 8-bit shared memory so reboot works. */
@@ -502,10 +508,12 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(mem_end, int, NULL, 0);
+module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index ae2a12b7db62..8308728fad05 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,6 +44,8 @@
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+static u32 zorro8390_msg_enable;
+
#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -86,9 +88,9 @@ static struct card_info {
static void zorro8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -119,8 +121,9 @@ static void zorro8390_get_8390_hdr(struct net_device *dev,
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
- netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
- __func__, ei_status.dmaing, ei_status.irqlock);
+ netdev_warn(dev,
+ "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -230,7 +233,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
/* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
zorro8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -248,8 +251,9 @@ static int zorro8390_open(struct net_device *dev)
static int zorro8390_close(struct net_device *dev)
{
- if (ei_debug > 1)
- netdev_dbg(dev, "Shutting down ethercard\n");
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
__ei_close(dev);
return 0;
}
@@ -293,6 +297,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
+ struct ei_device *ei_local = netdev_priv(dev);
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -383,6 +388,9 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
+
+ ei_local->msg_enable = zorro8390_msg_enable;
+
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 75fb1d20d6fd..c0f68dcd1dc1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -667,8 +667,8 @@ static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
return 1000000000UL / ppn;
}
-static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int bfin_mac_hwtstamp_set(struct net_device *netdev,
+ struct ifreq *ifr)
{
struct hwtstamp_config config;
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -824,6 +824,16 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
+static int bfin_mac_hwtstamp_get(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
+ sizeof(lp->stamp_cfg)) ?
+ -EFAULT : 0;
+}
+
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -1062,7 +1072,8 @@ static void bfin_phc_release(struct bfin_mac_local *lp)
#else
# define bfin_mac_hwtstamp_is_none(cfg) 0
# define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
# define bfin_phc_init(netdev, dev) 0
@@ -1496,7 +1507,9 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ return bfin_mac_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return bfin_mac_hwtstamp_get(netdev, ifr);
default:
if (lp->phydev)
return phy_mii_ioctl(lp->phydev, ifr, cmd);
@@ -1544,7 +1557,6 @@ static int bfin_mac_open(struct net_device *dev)
return ret;
phy_start(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index e06694555144..c5d75e7aeeb6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1361,7 +1360,7 @@ static int greth_mdio_init(struct greth_private *greth)
timeout = jiffies + 6*HZ;
while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
}
- genphy_read_status(greth->phy);
+ phy_read_status(greth->phy);
greth_link_change(greth->netdev);
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 46dfb1378c17..0cc21437478c 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -16,7 +16,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 219be1bf3cfc..1517e9df5ba1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -61,7 +61,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/highmem.h>
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 65926a956575..18e542f7853d 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -17,7 +17,6 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -42,9 +41,9 @@
#include "7990.h"
-#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
-#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
-#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
#include "hplance.h"
@@ -56,9 +55,9 @@
#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
/* Lossage Factor Nine, Mr Sulu. */
-#define WRITERAP(lp,x) (lp->writerap(lp,x))
-#define WRITERDP(lp,x) (lp->writerdp(lp,x))
-#define READRDP(lp) (lp->readrdp(lp))
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
#else
@@ -94,428 +93,436 @@ static inline __u16 READRDP(struct lance_private *lp)
#ifdef UNDEF
#define PRINT_RINGS() \
do { \
- int t; \
- for (t=0; t < RX_RING_SIZE; t++) { \
- printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
- t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
- ib->brx_ring[t].length,\
- ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
- }\
- for (t=0; t < TX_RING_SIZE; t++) { \
- printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
- t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
- ib->btx_ring[t].length,\
- ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
- }\
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
} while (0)
#else
#define PRINT_RINGS()
#endif
/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
{
- volatile struct lance_init_block *aib = lp->lance_init_block;
- int leptr;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
- leptr = LANCE_ADDR (aib);
+ leptr = LANCE_ADDR(aib);
- WRITERAP(lp, LE_CSR1); /* load address of init block */
- WRITERDP(lp, leptr & 0xFFFF);
- WRITERAP(lp, LE_CSR2);
- WRITERDP(lp, leptr >> 16);
- WRITERAP(lp, LE_CSR3);
- WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
- /* Point back to csr0 */
- WRITERAP(lp, LE_CSR0);
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
}
/* #define to 0 or 1 appropriately */
#define DEBUG_IRING 0
/* Set up the Lance Rx and Tx rings and the init block */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
- int leptr;
- int i;
-
- aib = lp->lance_init_block;
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
-
- /* Copy the ethernet address to the lance init block
- * Notice that we do a byteswap if we're big endian.
- * [I think this is the right criterion; at least, sunlance,
- * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
- * However, the datasheet says that the BSWAP bit doesn't affect
- * the init block, so surely it should be low byte first for
- * everybody? Um.]
- * We could define the ib->physaddr as three 16bit values and
- * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
- */
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
#ifdef __BIG_ENDIAN
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
#else
- for (i=0; i<6; i++)
- ib->phys_addr[i] = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
#endif
- if (DEBUG_IRING)
- printk ("TX rings:\n");
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
lp->tx_full = 0;
- /* Setup the Tx ring entries */
- for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the Rx ring entries */
- if (DEBUG_IRING)
- printk ("RX rings:\n");
- for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
-
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- /* 0xf000 == bits that must be one (reserved, presumably) */
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the initialization block */
-
- /* Setup rx descriptor pointer */
- leptr = LANCE_ADDR(&aib->brx_ring);
- ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
- ib->rx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("RX ptr: %8.8x\n", leptr);
-
- /* Setup tx descriptor pointer */
- leptr = LANCE_ADDR(&aib->btx_ring);
- ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
- ib->tx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("TX ptr: %8.8x\n", leptr);
-
- /* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
- PRINT_RINGS();
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
}
/* LANCE must be STOPped before we do this, too... */
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
{
- int i;
+ int i;
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_INIT);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
- /* Need a hook here for sunlance ledma stuff */
+ /* Need a hook here for sunlance ledma stuff */
- /* Wait for the lance to complete initialization */
- for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
- barrier();
- if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
- printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
- return -1;
- }
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
- /* Clear IDON by writing a "1", enable interrupts and start lance */
- WRITERDP(lp, LE_C0_IDON);
- WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
- return 0;
+ return 0;
}
-static int lance_reset (struct net_device *dev)
+static int lance_reset(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- load_csrs (lp);
- lance_init_ring (dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- status = init_restart_lance (lp);
+ load_csrs(lp);
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
- printk ("Lance restart=%d\n", status);
+ printk("Lance restart=%d\n", status);
#endif
- return status;
+ return status;
}
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_rx_desc *rd;
- unsigned char bits;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
#ifdef TEST_HITS
- int i;
+ int i;
#endif
#ifdef TEST_HITS
- printk ("[");
- for (i = 0; i < RX_RING_SIZE; i++) {
- if (i == lp->rx_new)
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
- else
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
- }
- printk ("]");
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
#endif
#ifdef CONFIG_HP300
blinken_leds(0x40, 0);
#endif
- WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
- for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
- !((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]) {
-
- /* We got an incomplete frame? */
- if ((bits & LE_R1_POK) != LE_R1_POK) {
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
- continue;
- } else if (bits & LE_R1_ERR) {
- /* Count only the end frame as a rx error,
- * not the beginning
- */
- if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) dev->stats.rx_errors++;
- } else {
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
int len = (rd->mblength & 0xfff) - 4;
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
- if (!skb) {
- dev->stats.rx_dropped++;
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- return 0;
- }
-
- skb_reserve (skb, 2); /* 16 byte align */
- skb_put (skb, len); /* make room */
- skb_copy_to_linear_data(skb,
- (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- }
-
- /* Return the packet to the pool */
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- }
- return 0;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
}
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_tx_desc *td;
- int i, j;
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
#ifdef CONFIG_HP300
blinken_leds(0x80, 0);
#endif
- /* csr0 is 2f3 */
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- /* csr0 is 73 */
-
- j = lp->tx_old;
- for (i = j; i != lp->tx_new; i = j) {
- td = &ib->btx_ring [i];
-
- /* If we hit a packet not owned by us, stop */
- if (td->tmd1_bits & LE_T1_OWN)
- break;
-
- if (td->tmd1_bits & LE_T1_ERR) {
- status = td->misc;
-
- dev->stats.tx_errors++;
- if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
- if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
-
- if (status & LE_T3_CLOS) {
- dev->stats.tx_carrier_errors++;
- if (lp->auto_select) {
- lp->tpe = 1 - lp->tpe;
- printk("%s: Carrier Lost, trying %s\n",
- dev->name, lp->tpe?"TPE":"AUI");
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- }
-
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)) {
- dev->stats.tx_fifo_errors++;
-
- printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
- dev->name);
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
- /*
- * So we don't count the packet more than once.
- */
- td->tmd1_bits &= ~(LE_T1_POK);
-
- /* One collision before packet was sent. */
- if (td->tmd1_bits & LE_T1_EONE)
- dev->stats.collisions++;
-
- /* More than one collision, be optimistic. */
- if (td->tmd1_bits & LE_T1_EMORE)
- dev->stats.collisions += 2;
-
- dev->stats.tx_packets++;
- }
-
- j = (j + 1) & lp->tx_ring_mod_mask;
- }
- lp->tx_old = j;
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- return 0;
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
}
static irqreturn_t
-lance_interrupt (int irq, void *dev_id)
+lance_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct lance_private *lp = netdev_priv(dev);
- int csr0;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
- WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
- csr0 = READRDP(lp);
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
- PRINT_RINGS();
+ PRINT_RINGS();
- if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
- spin_unlock (&lp->devlock);
- return IRQ_NONE; /* been generated by the Lance. */
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
}
- /* Acknowledge all the interrupt sources ASAP */
- WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
-
- if ((csr0 & LE_C0_ERR)) {
- /* Clear the error condition */
- WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
- }
-
- if (csr0 & LE_C0_RINT)
- lance_rx (dev);
-
- if (csr0 & LE_C0_TINT)
- lance_tx (dev);
-
- /* Log misc errors. */
- if (csr0 & LE_C0_BABL)
- dev->stats.tx_errors++; /* Tx babble. */
- if (csr0 & LE_C0_MISS)
- dev->stats.rx_errors++; /* Missed a Rx frame. */
- if (csr0 & LE_C0_MERR) {
- printk("%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* Restart the chip. */
- WRITERDP(lp, LE_C0_STRT);
- }
-
- if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
lp->tx_full = 0;
- netif_wake_queue (dev);
- }
+ netif_wake_queue(dev);
+ }
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
return IRQ_HANDLED;
}
-int lance_open (struct net_device *dev)
+int lance_open(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
int res;
- /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
- if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
- return -EAGAIN;
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
- res = lance_reset(dev);
+ res = lance_reset(dev);
spin_lock_init(&lp->devlock);
- netif_start_queue (dev);
+ netif_start_queue(dev);
return res;
}
EXPORT_SYMBOL_GPL(lance_open);
-int lance_close (struct net_device *dev)
+int lance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- /* Stop the LANCE */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- free_irq(lp->irq, dev);
+ free_irq(lp->irq, dev);
- return 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(lance_close);
@@ -524,122 +531,122 @@ void lance_tx_timeout(struct net_device *dev)
printk("lance_tx_timeout\n");
lance_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- int entry, skblen, len;
- static int outs;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- skblen = skb->len;
+ skblen = skb->len;
#ifdef DEBUG_DRIVER
- /* dump the packet */
- {
- int i;
-
- for (i = 0; i < 64; i++) {
- if ((i % 16) == 0)
- printk ("\n");
- printk ("%2.2x ", skb->data [i]);
- }
- }
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
#endif
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
- entry = lp->tx_new & lp->tx_ring_mod_mask;
- ib->btx_ring [entry].length = (-len) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
if (skb->len < ETH_ZLEN)
memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
- skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
- /* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
- lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
- outs++;
- /* Kick the lance: transmit now */
- WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb (skb);
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_kfree_skb(skb);
- spin_lock_irqsave (&lp->devlock, flags);
- if (TX_BUFFS_AVAIL)
- netif_start_queue (dev);
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
else
lp->tx_full = 1;
- spin_unlock_irqrestore (&lp->devlock, flags);
+ spin_unlock_irqrestore(&lp->devlock, flags);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(lance_start_xmit);
/* taken from the depca driver via a2065.c */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI){
- ib->filter [0] = 0xffffffff;
- ib->filter [1] = 0xffffffff;
- return;
- }
- /* clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
-
- /* Add addresses */
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
- crc = crc >> 26;
- mcast_table [crc >> 4] |= 1 << (crc & 0xf);
- }
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
}
-void lance_set_multicast (struct net_device *dev)
+void lance_set_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
int stopped;
stopped = netif_queue_stopped(dev);
if (!stopped)
- netif_stop_queue (dev);
-
- while (lp->tx_old != lp->tx_new)
- schedule();
+ netif_stop_queue(dev);
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
+ while (lp->tx_old != lp->tx_new)
+ schedule();
- if (dev->flags & IFF_PROMISC) {
- ib->mode |= LE_MO_PROM;
- } else {
- ib->mode &= ~LE_MO_PROM;
- lance_load_multicast (dev);
- }
- load_csrs (lp);
- init_restart_lance (lp);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
if (!stopped)
- netif_start_queue (dev);
+ netif_start_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_set_multicast);
@@ -648,10 +655,10 @@ void lance_poll(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
WRITERAP(lp, LE_CSR0);
WRITERDP(lp, LE_C0_STRT);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index ae33a99bf476..e9e0be313804 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -35,33 +35,32 @@
#define LANCE_LOG_RX_BUFFERS 3
#endif
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
-#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
-#define PKT_BUFF_SIZE (1544)
-#define RX_BUFF_SIZE PKT_BUFF_SIZE
-#define TX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
/* Each receive buffer is described by a receive message descriptor (RMD) */
struct lance_rx_desc {
- volatile unsigned short rmd0; /* low address of packet */
- volatile unsigned char rmd1_bits; /* descriptor bits */
- volatile unsigned char rmd1_hadr; /* high address of packet */
- volatile short length; /* This length is 2s complement (negative)!
- * Buffer length
- */
- volatile unsigned short mblength; /* Actual number of bytes received */
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
};
/* Ditto for TMD: */
struct lance_tx_desc {
- volatile unsigned short tmd0; /* low address of packet */
- volatile unsigned char tmd1_bits; /* descriptor bits */
- volatile unsigned char tmd1_hadr; /* high address of packet */
- volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
volatile unsigned short misc;
};
@@ -71,181 +70,178 @@ struct lance_tx_desc {
* init block,the Tx and Rx rings and the buffers together in memory:
*/
struct lance_init_block {
- volatile unsigned short mode; /* Pre-set mode (reg. 15) */
- volatile unsigned char phys_addr[6]; /* Physical ethernet address */
- volatile unsigned filter[2]; /* Multicast filter (64 bits) */
-
- /* Receive and transmit ring base, along with extra bits. */
- volatile unsigned short rx_ptr; /* receive descriptor addr */
- volatile unsigned short rx_len; /* receive len and high addr */
- volatile unsigned short tx_ptr; /* transmit descriptor addr */
- volatile unsigned short tx_len; /* transmit len and high addr */
-
- /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
- * This will be true if this whole struct is 8-byte aligned.
- */
- volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
- volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
-
- volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
- volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- /* we use this just to make the struct big enough that we can move its startaddr
- * in order to force alignment to an eight byte boundary.
- */
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
};
/* This is where we keep all the stuff the driver needs to know about.
* I'm definitely unhappy about the mechanism for allowing specific
* drivers to add things...
*/
-struct lance_private
-{
- char *name;
+struct lance_private {
+ const char *name;
unsigned long base;
- volatile struct lance_init_block *init_block; /* CPU address of RAM */
- volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
- int rx_new, tx_new;
- int rx_old, tx_old;
+ int rx_new, tx_new;
+ int rx_old, tx_old;
- int lance_log_rx_bufs, lance_log_tx_bufs;
- int rx_ring_mod_mask, tx_ring_mod_mask;
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
- int tpe; /* TPE is selected */
- int auto_select; /* cable-selection is by carrier */
- unsigned short busmaster_regval;
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
- unsigned int irq; /* IRQ to register */
+ unsigned int irq; /* IRQ to register */
- /* This is because the HP LANCE is disgusting and you have to check
- * a DIO-specific register every time you read/write the LANCE regs :-<
- * [could we get away with making these some sort of macro?]
- */
- void (*writerap)(void *, unsigned short);
- void (*writerdp)(void *, unsigned short);
- unsigned short (*readrdp)(void *);
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
spinlock_t devlock;
char tx_full;
};
/*
- * Am7990 Control and Status Registers
+ * Am7990 Control and Status Registers
*/
-#define LE_CSR0 0x0000 /* LANCE Controller Status */
-#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
-#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
-#define LE_CSR3 0x0003 /* Misc */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
/*
* Bit definitions for CSR0 (LANCE Controller Status)
*/
-#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
-#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
-#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
-#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
-#define LE_C0_MERR 0x0800 /* Memory Error */
-#define LE_C0_RINT 0x0400 /* Receive Interrupt */
-#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
-#define LE_C0_IDON 0x0100 /* Initialization Done */
-#define LE_C0_INTR 0x0080 /* Interrupt Flag
- = BABL | MISS | MERR | RINT | TINT | IDON */
-#define LE_C0_INEA 0x0040 /* Interrupt Enable */
-#define LE_C0_RXON 0x0020 /* Receive On */
-#define LE_C0_TXON 0x0010 /* Transmit On */
-#define LE_C0_TDMD 0x0008 /* Transmit Demand */
-#define LE_C0_STOP 0x0004 /* Stop */
-#define LE_C0_STRT 0x0002 /* Start */
-#define LE_C0_INIT 0x0001 /* Initialize */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
/*
* Bit definitions for CSR3
*/
-#define LE_C3_BSWP 0x0004 /* Byte Swap
- (on for big endian byte order) */
-#define LE_C3_ACON 0x0002 /* ALE Control
- (on for active low ALE) */
-#define LE_C3_BCON 0x0001 /* Byte Control */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
/*
* Mode Flags
*/
-#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
* but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
*/
-#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
-#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
-#define LE_MO_DLNKTST 0x1000 /* disable link status */
-#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
-#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
-#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
-#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
-#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
/* and this one is from the C-LANCE data sheet... */
-#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
- (C-LANCE, not original LANCE) */
-#define LE_MO_INTL 0x0040 /* Internal Loopback */
-#define LE_MO_DRTY 0x0020 /* Disable Retry */
-#define LE_MO_FCOLL 0x0010 /* Force Collision */
-#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
-#define LE_MO_LOOP 0x0004 /* Loopback Enable */
-#define LE_MO_DTX 0x0002 /* Disable Transmitter */
-#define LE_MO_DRX 0x0001 /* Disable Receiver */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
/*
* Receive Flags
*/
-#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_R1_ERR 0x40 /* Error */
-#define LE_R1_FRA 0x20 /* Framing Error */
-#define LE_R1_OFL 0x10 /* Overflow Error */
-#define LE_R1_CRC 0x08 /* CRC Error */
-#define LE_R1_BUF 0x04 /* Buffer Error */
-#define LE_R1_SOP 0x02 /* Start of Packet */
-#define LE_R1_EOP 0x01 /* End of Packet */
-#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Transmit Flags
*/
-#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_T1_ERR 0x40 /* Error */
-#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
-#define LE_T1_EMORE 0x10 /* More than one retry needed */
-#define LE_T1_EONE 0x08 /* One retry needed */
-#define LE_T1_EDEF 0x04 /* Deferred */
-#define LE_T1_SOP 0x02 /* Start of Packet */
-#define LE_T1_EOP 0x01 /* End of Packet */
-#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Error Flags
*/
-#define LE_T3_BUF 0x8000 /* Buffer Error */
-#define LE_T3_UFL 0x4000 /* Underflow Error */
-#define LE_T3_LCOL 0x1000 /* Late Collision */
-#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
-#define LE_T3_RTY 0x0400 /* Retry Error */
-#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
/* Miscellaneous useful macros */
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
- lp->tx_old - lp->tx_new-1)
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
-int lance_close (struct net_device *dev);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-void lance_set_multicast (struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index d042511bdc13..2061b471fd16 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -74,7 +72,6 @@ Revision History:
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 8baa3527ba74..a75092d584cc 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -753,7 +751,7 @@ struct amd8111e_priv{
const char *name;
struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
- /* Transmit and recive skbs */
+ /* Transmit and receive skbs */
struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
/* Transmit and receive dma mapped addr */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 427c148bb643..a2bd91e3d302 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -27,8 +27,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
@@ -48,7 +47,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 4b7f7ad62bb8..ca53024f017f 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -18,8 +18,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 0c61fd50d882..47ce57c2c893 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -127,41 +127,41 @@ static void hplance_remove_one(struct dio_dev *d)
/* Initialise a single lance board at the given DIO device */
static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
- unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
- struct hplance_private *lp;
- int i;
-
- /* reset the board */
- out_8(va+DIO_IDOFF, 0xff);
- udelay(100); /* ariba! ariba! udelay! udelay! */
-
- /* Fill the dev fields */
- dev->base_addr = va;
- dev->netdev_ops = &hplance_netdev_ops;
- dev->dma = 0;
-
- for (i=0; i<6; i++) {
- /* The NVRAM holds our ethernet address, one nibble per byte,
- * at bytes NVRAMOFF+1,3,5,7,9...
- */
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
- | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
- }
-
- lp = netdev_priv(dev);
- lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
- lp->lance.base = va;
- lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
- lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
- lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
- lp->lance.irq = d->ipl;
- lp->lance.writerap = hplance_writerap;
- lp->lance.writerdp = hplance_writerdp;
- lp->lance.readrdp = hplance_readrdp;
- lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
- lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
- lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
- lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
}
/* This is disgusting. We have to check the DIO status register for ack every
@@ -195,25 +195,25 @@ static unsigned short hplance_readrdp(void *priv)
static int hplance_open(struct net_device *dev)
{
- int status;
- struct lance_private *lp = netdev_priv(dev);
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
- status = lance_open(dev); /* call generic lance open code */
- if (status)
- return status;
- /* enable interrupts at board level. */
- out_8(lp->base + HPLANCE_STATUS, LE_IE);
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
- return 0;
+ return 0;
}
static int hplance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
- lance_close(dev);
- return 0;
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
}
static int __init hplance_init_module(void)
@@ -223,7 +223,7 @@ static int __init hplance_init_module(void)
static void __exit hplance_cleanup_module(void)
{
- dio_unregister_driver(&hplance_driver);
+ dio_unregister_driver(&hplance_driver);
}
module_init(hplance_init_module);
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index e108e911da05..0e8399dec054 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -94,33 +94,31 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->netdev_ops = &lance_netdev_ops;
dev->dma = 0;
- addr=(u_long *)ETHERNET_ADDRESS;
+ addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0]=0x08;
- dev->dev_addr[1]=0x00;
- dev->dev_addr[2]=0x3e;
- address=address>>8;
- dev->dev_addr[5]=address&0xff;
- address=address>>8;
- dev->dev_addr[4]=address&0xff;
- address=address>>8;
- dev->dev_addr[3]=address&0xff;
-
- printk("%s: MVME147 at 0x%08lx, irq %d, "
- "Hardware Address %pM\n",
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
dev->dev_addr);
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
- if (!lp->ram)
- {
+ if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
- lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.name = name;
lp->lance.base = dev->base_addr;
lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
@@ -167,8 +165,8 @@ static int m147lance_open(struct net_device *dev)
if (status)
return status;
/* enable interrupts at board level. */
- m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
- m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
return 0;
}
@@ -176,7 +174,7 @@ static int m147lance_open(struct net_device *dev)
static int m147lance_close(struct net_device *dev)
{
/* disable interrupts at boardlevel */
- m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
lance_close(dev);
return 0;
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index d4ed89130c52..08569fe2b182 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -132,7 +132,6 @@ Include Files
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 38492e0b704e..9339cccfe05a 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1668,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ece56831a647..5e4273b7aa27 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -80,7 +80,6 @@ static char lancestr[] = "LANCE";
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/socket.h> /* Used for the temporal inet entries and routing */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dc08678bf9a4..928fac6dd10a 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -122,7 +122,6 @@ struct buffer_state {
* @link: PHY's last seen link state.
* @duplex: PHY's last set duplex mode.
* @speed: PHY's last set speed.
- * @max_speed: Maximum supported by current system network data-rate.
*/
struct arc_emac_priv {
/* Devices */
@@ -152,7 +151,6 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
- unsigned int max_speed;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 248baf6273fb..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -381,17 +381,7 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising = phy_dev->supported;
-
- if (priv->max_speed > 100) {
- phy_dev->advertising &= PHY_GBIT_FEATURES;
- } else if (priv->max_speed <= 100) {
- phy_dev->advertising &= PHY_BASIC_FEATURES;
- if (priv->max_speed <= 10) {
- phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
- phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
- }
- }
+ phy_dev->advertising &= phy_dev->supported;
priv->last_rx_bd = 0;
@@ -704,14 +694,6 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
- /* Get max speed of operation from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "max-speed",
- &priv->max_speed)) {
- dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
- err = -EINVAL;
- goto out;
- }
-
ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d71103dbf2cd..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -106,6 +106,9 @@ struct alx_priv {
u16 msg_enable;
bool msi;
+
+ /* protects hw.stats */
+ spinlock_t stats_lock;
};
extern const struct ethtool_ops alx_ethtool_ops;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 45b36507abc1..08e22df2a300 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -46,6 +46,66 @@
#include "reg.h"
#include "hw.h"
+/* The order of these strings must match the order of the fields in
+ * struct alx_hw_stats
+ * See hw.h
+ */
+static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bcast_packets",
+ "rx_mcast_packets",
+ "rx_pause_packets",
+ "rx_ctrl_packets",
+ "rx_fcs_errors",
+ "rx_length_errors",
+ "rx_bytes",
+ "rx_runt_packets",
+ "rx_fragments",
+ "rx_64B_or_less_packets",
+ "rx_65B_to_127B_packets",
+ "rx_128B_to_255B_packets",
+ "rx_256B_to_511B_packets",
+ "rx_512B_to_1023B_packets",
+ "rx_1024B_to_1518B_packets",
+ "rx_1519B_to_mtu_packets",
+ "rx_oversize_packets",
+ "rx_rxf_ov_drop_packets",
+ "rx_rrd_ov_drop_packets",
+ "rx_align_errors",
+ "rx_bcast_bytes",
+ "rx_mcast_bytes",
+ "rx_address_errors",
+ "tx_packets",
+ "tx_bcast_packets",
+ "tx_mcast_packets",
+ "tx_pause_packets",
+ "tx_exc_defer_packets",
+ "tx_ctrl_packets",
+ "tx_defer_packets",
+ "tx_bytes",
+ "tx_64B_or_less_packets",
+ "tx_65B_to_127B_packets",
+ "tx_128B_to_255B_packets",
+ "tx_256B_to_511B_packets",
+ "tx_512B_to_1023B_packets",
+ "tx_1024B_to_1518B_packets",
+ "tx_1519B_to_mtu_packets",
+ "tx_single_collision",
+ "tx_multiple_collisions",
+ "tx_late_collision",
+ "tx_abort_collision",
+ "tx_underrun",
+ "tx_trd_eop",
+ "tx_length_errors",
+ "tx_trunc_packets",
+ "tx_bcast_bytes",
+ "tx_mcast_bytes",
+ "tx_update",
+};
+
+#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats)
+
+
static u32 alx_get_supported_speeds(struct alx_hw *hw)
{
u32 supported = SUPPORTED_10baseT_Half |
@@ -201,6 +261,44 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data)
alx->msg_enable = data;
}
+static void alx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(hw);
+ BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+ ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+
+ spin_unlock(&alx->stats_lock);
+}
+
+static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int alx_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ALX_NUM_STATS;
+ default:
+ return -EINVAL;
+ }
+}
+
const struct ethtool_ops alx_ethtool_ops = {
.get_settings = alx_get_settings,
.set_settings = alx_set_settings,
@@ -209,4 +307,7 @@ const struct ethtool_ops alx_ethtool_ops = {
.get_msglevel = alx_get_msglevel,
.set_msglevel = alx_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_strings = alx_get_strings,
+ .get_sset_count = alx_get_sset_count,
+ .get_ethtool_stats = alx_get_ethtool_stats,
};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1e8c24a3cb4e..7712f068f6d4 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1050,3 +1050,61 @@ bool alx_get_phy_info(struct alx_hw *hw)
return true;
}
+
+void alx_update_hw_stats(struct alx_hw *hw)
+{
+ /* RX stats */
+ hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK);
+ hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
+ hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
+ hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
+ hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
+ hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
+ hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
+ hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
+ hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
+ hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
+ hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
+ hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
+ hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
+ hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
+ hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
+ hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
+ hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
+ hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
+ hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
+ hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
+ hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
+ hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
+ hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
+ hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
+
+ /* TX stats */
+ hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK);
+ hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
+ hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
+ hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
+ hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
+ hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
+ hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
+ hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
+ hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
+ hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
+ hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
+ hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
+ hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
+ hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
+ hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
+ hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
+ hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
+ hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
+ hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
+ hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
+ hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
+ hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
+ hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
+ hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
+ hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
+
+ hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE);
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 96f3b4381e17..15548802d6f8 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -381,6 +381,73 @@ struct alx_rrd {
ALX_ISR_RX_Q6 | \
ALX_ISR_RX_Q7)
+/* Statistics counters collected by the MAC
+ *
+ * The order of the fields must match the strings in alx_gstrings_stats
+ * All stats fields should be u64
+ * See ethtool.c
+ */
+struct alx_hw_stats {
+ /* rx */
+ u64 rx_ok; /* good RX packets */
+ u64 rx_bcast; /* good RX broadcast packets */
+ u64 rx_mcast; /* good RX multicast packets */
+ u64 rx_pause; /* RX pause frames */
+ u64 rx_ctrl; /* RX control packets other than pause frames */
+ u64 rx_fcs_err; /* RX packets with bad FCS */
+ u64 rx_len_err; /* RX packets with length != actual size */
+ u64 rx_byte_cnt; /* good bytes received. FCS is NOT included */
+ u64 rx_runt; /* RX packets < 64 bytes with good FCS */
+ u64 rx_frag; /* RX packets < 64 bytes with bad FCS */
+ u64 rx_sz_64B; /* 64 byte RX packets */
+ u64 rx_sz_127B; /* 65-127 byte RX packets */
+ u64 rx_sz_255B; /* 128-255 byte RX packets */
+ u64 rx_sz_511B; /* 256-511 byte RX packets */
+ u64 rx_sz_1023B; /* 512-1023 byte RX packets */
+ u64 rx_sz_1518B; /* 1024-1518 byte RX packets */
+ u64 rx_sz_max; /* 1519 byte to MTU RX packets */
+ u64 rx_ov_sz; /* truncated RX packets, size > MTU */
+ u64 rx_ov_rxf; /* frames dropped due to RX FIFO overflow */
+ u64 rx_ov_rrd; /* frames dropped due to RRD overflow */
+ u64 rx_align_err; /* alignment errors */
+ u64 rx_bc_byte_cnt; /* RX broadcast bytes, excluding FCS */
+ u64 rx_mc_byte_cnt; /* RX multicast bytes, excluding FCS */
+ u64 rx_err_addr; /* packets dropped due to address filtering */
+
+ /* tx */
+ u64 tx_ok; /* good TX packets */
+ u64 tx_bcast; /* good TX broadcast packets */
+ u64 tx_mcast; /* good TX multicast packets */
+ u64 tx_pause; /* TX pause frames */
+ u64 tx_exc_defer; /* TX packets deferred excessively */
+ u64 tx_ctrl; /* TX control frames, excluding pause frames */
+ u64 tx_defer; /* TX packets deferred */
+ u64 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
+ u64 tx_sz_64B; /* 64 byte TX packets */
+ u64 tx_sz_127B; /* 65-127 byte TX packets */
+ u64 tx_sz_255B; /* 128-255 byte TX packets */
+ u64 tx_sz_511B; /* 256-511 byte TX packets */
+ u64 tx_sz_1023B; /* 512-1023 byte TX packets */
+ u64 tx_sz_1518B; /* 1024-1518 byte TX packets */
+ u64 tx_sz_max; /* 1519 byte to MTU TX packets */
+ u64 tx_single_col; /* packets TX after a single collision */
+ u64 tx_multi_col; /* packets TX after multiple collisions */
+ u64 tx_late_col; /* TX packets with late collisions */
+ u64 tx_abort_col; /* TX packets aborted w/excessive collisions */
+ u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
+ * or TRD FIFO underrun
+ */
+ u64 tx_trd_eop; /* reads beyond the EOP into the next frame
+ * when TRD was not written timely
+ */
+ u64 tx_len_err; /* TX packets where length != actual size */
+ u64 tx_trunc; /* TX packets truncated due to size > MTU */
+ u64 tx_bc_byte_cnt; /* broadcast bytes transmitted, excluding FCS */
+ u64 tx_mc_byte_cnt; /* multicast bytes transmitted, excluding FCS */
+ u64 update;
+};
+
+
/* maximum interrupt vectors for msix */
#define ALX_MAX_MSIX_INTRS 16
@@ -424,6 +491,9 @@ struct alx_hw {
/* PHY link patch flag */
bool lnk_patch;
+
+ /* cumulated stats from the hardware (registers are cleared on read) */
+ struct alx_hw_stats stats;
};
static inline int alx_hw_revision(struct alx_hw *hw)
@@ -491,6 +561,7 @@ bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
+void alx_update_hw_stats(struct alx_hw *hw);
static inline u32 alx_speed_to_ethadv(int speed, u8 duplex)
{
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c3c4c266b846..e92ffd6e1c15 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1166,10 +1166,60 @@ static void alx_poll_controller(struct net_device *netdev)
}
#endif
+static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+ struct alx_hw_stats *hw_stats = &alx->hw.stats;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(&alx->hw);
+
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_single_col +
+ hw_stats->tx_multi_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_ov_sz +
+ hw_stats->rx_ov_rrd +
+ hw_stats->rx_align_err +
+ hw_stats->rx_ov_rxf;
+
+ net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_dropped = hw_stats->rx_ov_rrd;
+
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
+
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+
+ spin_unlock(&alx->stats_lock);
+
+ return net_stats;
+}
+
static const struct net_device_ops alx_netdev_ops = {
.ndo_open = alx_open,
.ndo_stop = alx_stop,
.ndo_start_xmit = alx_start_xmit,
+ .ndo_get_stats64 = alx_get_stats64,
.ndo_set_rx_mode = alx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = alx_set_mac_address,
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index e4358c98bc4e..af006b44b2a6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -404,15 +404,59 @@
/* MIB */
#define ALX_MIB_BASE 0x1700
+
#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_BCAST (ALX_MIB_BASE + 4)
+#define ALX_MIB_RX_MCAST (ALX_MIB_BASE + 8)
+#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12)
+#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16)
+#define ALX_MIB_RX_FCS_ERR (ALX_MIB_BASE + 20)
+#define ALX_MIB_RX_LEN_ERR (ALX_MIB_BASE + 24)
+#define ALX_MIB_RX_BYTE_CNT (ALX_MIB_BASE + 28)
+#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32)
+#define ALX_MIB_RX_FRAG (ALX_MIB_BASE + 36)
+#define ALX_MIB_RX_SZ_64B (ALX_MIB_BASE + 40)
+#define ALX_MIB_RX_SZ_127B (ALX_MIB_BASE + 44)
+#define ALX_MIB_RX_SZ_255B (ALX_MIB_BASE + 48)
+#define ALX_MIB_RX_SZ_511B (ALX_MIB_BASE + 52)
+#define ALX_MIB_RX_SZ_1023B (ALX_MIB_BASE + 56)
+#define ALX_MIB_RX_SZ_1518B (ALX_MIB_BASE + 60)
+#define ALX_MIB_RX_SZ_MAX (ALX_MIB_BASE + 64)
+#define ALX_MIB_RX_OV_SZ (ALX_MIB_BASE + 68)
+#define ALX_MIB_RX_OV_RXF (ALX_MIB_BASE + 72)
+#define ALX_MIB_RX_OV_RRD (ALX_MIB_BASE + 76)
+#define ALX_MIB_RX_ALIGN_ERR (ALX_MIB_BASE + 80)
+#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84)
+#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88)
#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+
#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_BCAST (ALX_MIB_BASE + 100)
+#define ALX_MIB_TX_MCAST (ALX_MIB_BASE + 104)
+#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108)
+#define ALX_MIB_TX_EXC_DEFER (ALX_MIB_BASE + 112)
+#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116)
+#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120)
+#define ALX_MIB_TX_BYTE_CNT (ALX_MIB_BASE + 124)
+#define ALX_MIB_TX_SZ_64B (ALX_MIB_BASE + 128)
+#define ALX_MIB_TX_SZ_127B (ALX_MIB_BASE + 132)
+#define ALX_MIB_TX_SZ_255B (ALX_MIB_BASE + 136)
+#define ALX_MIB_TX_SZ_511B (ALX_MIB_BASE + 140)
+#define ALX_MIB_TX_SZ_1023B (ALX_MIB_BASE + 144)
+#define ALX_MIB_TX_SZ_1518B (ALX_MIB_BASE + 148)
+#define ALX_MIB_TX_SZ_MAX (ALX_MIB_BASE + 152)
+#define ALX_MIB_TX_SINGLE_COL (ALX_MIB_BASE + 156)
+#define ALX_MIB_TX_MULTI_COL (ALX_MIB_BASE + 160)
+#define ALX_MIB_TX_LATE_COL (ALX_MIB_BASE + 164)
+#define ALX_MIB_TX_ABORT_COL (ALX_MIB_BASE + 168)
+#define ALX_MIB_TX_UNDERRUN (ALX_MIB_BASE + 172)
+#define ALX_MIB_TX_TRD_EOP (ALX_MIB_BASE + 176)
+#define ALX_MIB_TX_LEN_ERR (ALX_MIB_BASE + 180)
+#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184)
+#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188)
#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196)
-#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
-#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
-#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
-#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
#define ALX_ISR 0x1600
#define ALX_ISR_DIS BIT(31)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 7f9369a3b378..b9203d928938 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -22,7 +22,6 @@
#ifndef _ATL1C_H_
#define _ATL1C_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 29801750f239..4d3258dd0a88 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1500,31 +1500,40 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
+
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 1b0fe2d04a0e..0212dac7e23a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -23,7 +23,6 @@
#ifndef _ATL1E_H_
#define _ATL1E_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7a73f3a9fcb5..d5c2d3e912e5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1177,32 +1177,40 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
struct net_device_stats *net_stats = &netdev->stats;
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 538211d6f7d9..287272dd69da 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1678,33 +1678,42 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct stats_msg_block *smb = adapter->smb.smb;
+ u64 new_rx_errors = smb->rx_frag +
+ smb->rx_fcs_err +
+ smb->rx_len_err +
+ smb->rx_sz_ov +
+ smb->rx_rxf_ov +
+ smb->rx_rrd_ov +
+ smb->rx_align_err;
+ u64 new_tx_errors = smb->tx_late_col +
+ smb->tx_abort_col +
+ smb->tx_underrun +
+ smb->tx_trunc;
+
/* Fill out the OS statistics structure */
- adapter->soft_stats.rx_packets += smb->rx_ok;
- adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
+ adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
adapter->soft_stats.multicast += smb->rx_mcast;
- adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
- smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+ adapter->soft_stats.collisions += smb->tx_1_col +
+ smb->tx_2_col +
+ smb->tx_late_col +
+ smb->tx_abort_col;
/* Rx Errors */
- adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
- smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
- smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_errors += new_rx_errors;
adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
adapter->soft_stats.rx_length_errors += smb->rx_len_err;
adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
- adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
- smb->rx_rxf_ov);
adapter->soft_stats.rx_pause += smb->rx_pause;
adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
/* Tx Errors */
- adapter->soft_stats.tx_errors += (smb->tx_late_col +
- smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_errors += new_tx_errors;
adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
adapter->soft_stats.tx_window_errors += smb->tx_late_col;
@@ -1718,23 +1727,18 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_trunc += smb->tx_trunc;
adapter->soft_stats.tx_pause += smb->tx_pause;
- netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
- netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
netdev->stats.multicast = adapter->soft_stats.multicast;
netdev->stats.collisions = adapter->soft_stats.collisions;
netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
- netdev->stats.rx_over_errors =
- adapter->soft_stats.rx_missed_errors;
netdev->stats.rx_length_errors =
adapter->soft_stats.rx_length_errors;
netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
netdev->stats.rx_frame_errors =
adapter->soft_stats.rx_frame_errors;
netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
- netdev->stats.rx_missed_errors =
- adapter->soft_stats.rx_missed_errors;
+ netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
netdev->stats.tx_aborted_errors =
@@ -1743,6 +1747,9 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_window_errors;
netdev->stats.tx_carrier_errors =
adapter->soft_stats.tx_carrier_errors;
+
+ netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
+ netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
}
static void atl1_update_mailbox(struct atl1_adapter *adapter)
@@ -1872,7 +1879,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
adapter->rx_buffer_len);
if (unlikely(!skb)) {
/* Better luck next round */
- adapter->netdev->stats.rx_dropped++;
+ adapter->soft_stats.rx_dropped++;
break;
}
@@ -3122,7 +3129,8 @@ static void atl1_remove(struct pci_dev *pdev)
* from the BIOS during POST. If we've been messing with the MAC
* address, we need to save the permanent one.
*/
- if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
+ adapter->hw.perm_mac_addr)) {
memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
ETH_ALEN);
atl1_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 3bf79a56220d..34a58cd846a0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -666,6 +666,7 @@ struct atl1_sft_stats {
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
+ u64 rx_dropped;
u64 rx_frame_errors;
u64 rx_fifo_errors;
u64 rx_missed_errors;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 2fa5b86f139d..3f97d9fd0a71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -23,6 +23,7 @@ config B44
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
+ select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 90e54d5488dc..1f7b5aa114fa 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* Distribute under GPL.
*/
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
+#include <linux/phy.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_readphy(bp, bp->phy_addr, reg, val);
@@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_writephy(bp, bp->phy_addr, reg, val);
}
/* miilib interface */
-static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
{
u32 val;
struct b44 *bp = netdev_priv(dev);
@@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)
return val;
}
-static void b44_mii_write(struct net_device *dev, int phy_id, int location,
- int val)
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+ int val)
{
struct b44 *bp = netdev_priv(dev);
__b44_writephy(bp, phy_id, location, val);
}
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = bus->priv;
+ int rc = __b44_readphy(bp, phy_id, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
+{
+ struct b44 *bp = bus->priv;
+ return __b44_writephy(bp, phy_id, location, val);
+}
+
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
@@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)
b44_wap54g10_workaround(bp);
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
@@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
bp->flags |= B44_FLAG_100_BASE_T;
- bp->flags |= B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev)) {
u32 val = br32(bp, B44_TX_CTRL);
- val |= TX_CTRL_DUPLEX;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
netif_carrier_on(bp->dev);
b44_link_report(bp);
@@ -1315,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(bp, B44_ENET_CTRL);
- bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ bp->flags |= B44_FLAG_EXTERNAL_PHY;
} else {
u32 val = br32(bp, B44_DEVCTRL);
@@ -1324,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
br32(bp, B44_DEVCTRL);
udelay(100);
}
- bp->flags |= B44_FLAG_INTERNAL_PHY;
+ bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
}
}
@@ -1339,7 +1360,10 @@ static void b44_halt(struct b44 *bp)
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
- b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+ else
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
/* bp->lock is held. */
@@ -1805,6 +1829,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ return phy_ethtool_gset(bp->phydev, cmd);
+ }
+
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
@@ -1828,8 +1857,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (cmd->autoneg == AUTONEG_ENABLE)
@@ -1846,7 +1875,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed;
+ int ret;
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ spin_lock_irq(&bp->lock);
+ if (netif_running(dev))
+ b44_setup_phy(bp);
+
+ ret = phy_ethtool_sset(bp->phydev, cmd);
+
+ spin_unlock_irq(&bp->lock);
+
+ return ret;
+ }
+
+ speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -2076,7 +2121,6 @@ static const struct ethtool_ops b44_ethtool_ops = {
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
int err = -EINVAL;
@@ -2084,7 +2128,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
goto out;
spin_lock_irq(&bp->lock);
- err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ } else {
+ err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+ }
spin_unlock_irq(&bp->lock);
out:
return err;
@@ -2146,6 +2195,141 @@ static const struct net_device_ops b44_netdev_ops = {
#endif
};
+static void b44_adjust_link(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phydev;
+ bool status_changed = 0;
+
+ BUG_ON(!phydev);
+
+ if (bp->old_link != phydev->link) {
+ status_changed = 1;
+ bp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link) {
+ if ((phydev->duplex == DUPLEX_HALF) &&
+ (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+ } else if ((phydev->duplex == DUPLEX_FULL) &&
+ !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+ }
+
+ if (status_changed) {
+ b44_check_phy(bp);
+ phy_print_status(phydev);
+ }
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus;
+ struct ssb_device *sdev = bp->sdev;
+ struct phy_device *phydev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ struct ssb_sprom *sprom = &sdev->bus->sprom;
+ int err;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mii_bus->priv = bp;
+ mii_bus->read = b44_mdio_read_phylib;
+ mii_bus->write = b44_mdio_write_phylib;
+ mii_bus->name = "b44_eth_mii";
+ mii_bus->parent = sdev->dev;
+ mii_bus->phy_mask = ~(1 << bp->phy_addr);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+ mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mii_bus->irq) {
+ dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
+ goto err_out_mdiobus;
+ }
+
+ memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+ bp->mii_bus = mii_bus;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(sdev->dev, "failed to register MII bus\n");
+ goto err_out_mdiobus_irq;
+ }
+
+ if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+ (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+ dev_info(sdev->dev,
+ "could not find PHY at %i, use fixed one\n",
+ bp->phy_addr);
+
+ bp->phy_addr = 0;
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+ bp->phy_addr);
+ } else {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bp->phy_addr);
+ }
+
+ phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ dev_err(sdev->dev, "could not attach PHY at %i\n",
+ bp->phy_addr);
+ err = PTR_ERR(phydev);
+ goto err_out_mdiobus_unregister;
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII);
+ phydev->advertising = phydev->supported;
+
+ bp->phydev = phydev;
+ bp->old_link = 0;
+ bp->phy_addr = phydev->addr;
+
+ dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+
+err_out_mdiobus_unregister:
+ mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+ kfree(mii_bus->irq);
+
+err_out_mdiobus:
+ mdiobus_free(mii_bus);
+
+err_out:
+ return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus = bp->mii_bus;
+
+ phy_disconnect(bp->phydev);
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
static int b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
@@ -2206,9 +2390,15 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_powerdown;
}
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+ err = -ENODEV;
+ goto err_out_powerdown;
+ }
+
bp->mii_if.dev = dev;
- bp->mii_if.mdio_read = b44_mii_read;
- bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.mdio_read = b44_mdio_read_mii;
+ bp->mii_if.mdio_write = b44_mdio_write_mii;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
@@ -2236,13 +2426,26 @@ static int b44_init_one(struct ssb_device *sdev,
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
- if (b44_phy_reset(bp) < 0)
- bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+ err = b44_phy_reset(bp);
+ if (err < 0) {
+ dev_err(sdev->dev, "phy reset failed\n");
+ goto err_out_unregister_netdev;
+ }
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ err = b44_register_phy_one(bp);
+ if (err) {
+ dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+ goto err_out_unregister_netdev;
+ }
+ }
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
@@ -2256,8 +2459,11 @@ out:
static void b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
unregister_netdev(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 8993d72f0420..3e9c3fc7591b 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -280,9 +280,10 @@ struct ring_info {
dma_addr_t mapping;
};
-#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_PHY 30
-#define B44_MDC_RATIO 5000000
+#define B44_MCAST_TABLE_SIZE 32
+#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
+#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
@@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE
struct u64_stats_sync syncp;
};
+#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */
+#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */
+
struct ssb_device;
struct b44 {
@@ -376,7 +380,7 @@ struct b44 {
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
-#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_EXTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
@@ -396,6 +400,9 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int old_link;
struct mii_if_info mii_if;
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e2aa09ce6af7..0297a79a38e1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_TX_BL_MASK;
+ ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_MR_MASK;
+ ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PC_MASK;
+ ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PT_MASK;
+ ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+ }
ctl |= BGMAC_DMA_TX_ENABLE;
ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
@@ -240,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_RX_BL_MASK;
+ ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PC_MASK;
+ ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PT_MASK;
+ ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+ }
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
@@ -682,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
return 0;
}
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
-static void bgmac_phy_force(struct bgmac *bgmac)
-{
- u16 ctl;
- u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
- BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (bgmac->autoneg)
- return;
-
- ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
- ctl &= mask;
- if (bgmac->full_duplex)
- ctl |= BGMAC_PHY_CTL_DUPLEX;
- if (bgmac->speed == BGMAC_SPEED_100)
- ctl |= BGMAC_PHY_CTL_SPEED_100;
- else if (bgmac->speed == BGMAC_SPEED_1000)
- ctl |= BGMAC_PHY_CTL_SPEED_1000;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
-static void bgmac_phy_advertise(struct bgmac *bgmac)
-{
- u16 adv;
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (!bgmac->autoneg)
- return;
-
- /* Adv selected 10/100 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
- adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
- BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10HALF;
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10FULL;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
-
- /* Adv selected 1000 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
- adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
-
- /* Restart */
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
- BGMAC_PHY_CTL_RESTART);
-}
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
static void bgmac_phy_init(struct bgmac *bgmac)
{
@@ -789,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)
if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
return;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- BGMAC_PHY_CTL_RESET);
+ bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
- BGMAC_PHY_CTL_RESET)
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
bgmac_err(bgmac, "PHY reset failed\n");
bgmac_phy_init(bgmac);
}
@@ -811,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
}
@@ -876,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
-static void bgmac_speed(struct bgmac *bgmac, int speed)
+static void bgmac_mac_speed(struct bgmac *bgmac)
{
u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
u32 set = 0;
- if (speed & BGMAC_SPEED_10)
+ switch (bgmac->mac_speed) {
+ case SPEED_10:
set |= BGMAC_CMDCFG_ES_10;
- if (speed & BGMAC_SPEED_100)
+ break;
+ case SPEED_100:
set |= BGMAC_CMDCFG_ES_100;
- if (speed & BGMAC_SPEED_1000)
+ break;
+ case SPEED_1000:
set |= BGMAC_CMDCFG_ES_1000;
- if (!bgmac->full_duplex)
+ break;
+ case SPEED_2500:
+ set |= BGMAC_CMDCFG_ES_2500;
+ break;
+ default:
+ bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ }
+
+ if (bgmac->mac_duplex == DUPLEX_HALF)
set |= BGMAC_CMDCFG_HD;
+
bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
- BGMAC_DS_MM_SHIFT;
- if (imode == 0 || imode == 1) {
- if (bgmac->autoneg)
- bgmac_speed(bgmac, BGMAC_SPEED_100);
- else
- bgmac_speed(bgmac, bgmac->speed);
+ struct bcma_device *core = bgmac->core;
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
+ u8 imode;
+
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018) {
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
+ bgmac->mac_speed = SPEED_2500;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ } else {
+ imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+ BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ bgmac->mac_speed = SPEED_100;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ }
}
}
@@ -910,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_device *core = bgmac->core;
struct bcma_bus *bus = core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags = 0;
+ u32 flags;
u32 iost;
int i;
@@ -933,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+ if (ci->id != BCMA_CHIP_ID_BCM4707) {
+ flags = 0;
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bcma_core_enable(core, flags);
}
- bcma_core_enable(core, flags);
-
- if (core->id.rev > 2) {
- bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ /* Request Misc PLL for corerev > 2 */
+ if (core->id.rev > 2 &&
+ ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ bgmac_set(bgmac, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
ci->id == BCMA_CHIP_ID_BCM53572) {
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
@@ -967,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
@@ -1007,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR,
+ BGMAC_CMDCFG_SR(core->id.rev),
false);
+ bgmac->mac_speed = SPEED_UNKNOWN;
+ bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
@@ -1048,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR, true);
+ BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
@@ -1077,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)
break;
}
- rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
- rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
- mdp = (bp_clk * 128 / 1000) - 3;
- rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
- bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+ 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ }
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
@@ -1108,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
- if (!bgmac->autoneg) {
- bgmac_speed(bgmac, bgmac->speed);
- bgmac_phy_force(bgmac);
- } else if (bgmac->speed) { /* if there is anything to adv */
- bgmac_phy_advertise(bgmac);
- }
-
if (full_init) {
bgmac_dma_init(bgmac);
if (1) /* FIXME: is there any case we don't want IRQs? */
@@ -1204,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)
}
napi_enable(&bgmac->napi);
+ phy_start(bgmac->phy_dev);
+
netif_carrier_on(net_dev);
err_out:
@@ -1216,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
+ phy_stop(bgmac->phy_dev);
+
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
free_irq(bgmac->core->irq, net_dev);
@@ -1252,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = bgmac->phyaddr;
- /* fallthru */
- case SIOCGMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- data->val_out = bgmac_phy_read(bgmac, data->phy_id,
- data->reg_num & 0x1f);
- return 0;
- case SIOCSMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
- data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+
+ if (!netif_running(net_dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1294,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,
{
struct bgmac *bgmac = netdev_priv(net_dev);
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg;
-
- if (bgmac->autoneg) {
- WARN_ON(cmd->advertising);
- if (bgmac->full_duplex) {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Half;
- }
- } else {
- switch (bgmac->speed) {
- case BGMAC_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
- break;
- case BGMAC_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
- break;
- case BGMAC_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- break;
- }
- }
-
- cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-
- cmd->autoneg = bgmac->autoneg;
-
- return 0;
+ return phy_ethtool_gset(bgmac->phy_dev, cmd);
}
-#if 0
static int bgmac_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- return -1;
+ return phy_ethtool_sset(bgmac->phy_dev, cmd);
}
-#endif
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
@@ -1359,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
static const struct ethtool_ops bgmac_ethtool_ops = {
.get_settings = bgmac_get_settings,
+ .set_settings = bgmac_set_settings,
.get_drvinfo = bgmac_get_drvinfo,
};
@@ -1377,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bgmac_phy_write(bus->priv, mii_id, regnum, value);
}
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct phy_device *phy_dev = bgmac->phy_dev;
+ bool update = false;
+
+ if (phy_dev->link) {
+ if (phy_dev->speed != bgmac->mac_speed) {
+ bgmac->mac_speed = phy_dev->speed;
+ update = true;
+ }
+
+ if (phy_dev->duplex != bgmac->mac_duplex) {
+ bgmac->mac_duplex = phy_dev->duplex;
+ update = true;
+ }
+ }
+
+ if (update) {
+ bgmac_mac_speed(bgmac);
+ phy_print_status(phy_dev);
+ }
+}
+
static int bgmac_mii_register(struct bgmac *bgmac)
{
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
int i, err = 0;
mii_bus = mdiobus_alloc();
@@ -1411,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)
bgmac->mii_bus = mii_bus;
+ /* Connect to the PHY */
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ bgmac_err(bgmac, "PHY connecton failed\n");
+ err = PTR_ERR(phy_dev);
+ goto err_unregister_bus;
+ }
+ bgmac->phy_dev = phy_dev;
+
return err;
+err_unregister_bus:
+ mdiobus_unregister(mii_bus);
err_free_irq:
kfree(mii_bus->irq);
err_free_bus:
@@ -1467,9 +1443,6 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
/* Defaults */
- bgmac->autoneg = true;
- bgmac->full_duplex = true;
- bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
/* On BCM4706 we need common core to access PHY */
@@ -1500,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)
bgmac_chip_reset(bgmac);
+ /* For Northstar, we have to take all GMAC core out of reset */
+ if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
+ core->id.id == BCMA_CHIP_ID_BCM53018) {
+ struct bcma_device *ns_core;
+ int ns_gmac;
+
+ /* Northstar has 4 GMAC cores */
+ for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+ /* As Northstar requirement, we have to reset all GMACs
+ * before accessing one. bgmac_chip_reset() call
+ * bcma_core_enable() for this core. Then the other
+ * three GMACs didn't reset. We do it here.
+ */
+ ns_core = bcma_find_core_unit(core->bus,
+ BCMA_CORE_MAC_GBIT,
+ ns_gmac);
+ if (ns_core && !bcma_core_is_enabled(ns_core))
+ bcma_core_enable(ns_core, 0);
+ }
+ }
+
err = bgmac_dma_alloc(bgmac);
if (err) {
bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
@@ -1524,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core)
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
- err = -ENOTSUPP;
goto err_dma_free;
}
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
- err = -ENOTSUPP;
goto err_mii_unregister;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 66c8afbdc8c7..89fa5bc69c51 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -95,7 +95,11 @@
#define BGMAC_RXQ_CTL_MDP_SHIFT 24
#define BGMAC_GPIO_SELECT 0x194
#define BGMAC_GPIO_OUTPUT_EN 0x198
-/* For 0x1e0 see BCMA_CLKCTLST */
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
+
#define BGMAC_HW_WAR 0x1e4
#define BGMAC_PWR_CTL 0x1e8
#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
@@ -185,6 +189,7 @@
#define BGMAC_CMDCFG_ES_10 0x00000000
#define BGMAC_CMDCFG_ES_100 0x00000004
#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_ES_2500 0x0000000C
#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
#define BGMAC_CMDCFG_PAD_EN 0x00000020
#define BGMAC_CMDCFG_CF 0x00000040
@@ -193,7 +198,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -216,27 +223,6 @@
#define BGMAC_RX_STATUS 0xb38
#define BGMAC_TX_STATUS 0xb3c
-#define BGMAC_PHY_CTL 0x00
-#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
-#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
-#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
-#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
-#define BGMAC_PHY_CTL_SPEED 0x2000
-#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
-#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
-/* Helpers */
-#define BGMAC_PHY_CTL_SPEED_10 0
-#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
-#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
-#define BGMAC_PHY_ADV 0x04
-#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
-#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
-#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
-#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
-#define BGMAC_PHY_ADV2 0x09
-#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
-#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
-
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
@@ -254,9 +240,34 @@
#define BGMAC_DMA_TX_SUSPEND 0x00000002
#define BGMAC_DMA_TX_LOOPBACK 0x00000004
#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_TX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_TX_BL_SHIFT 18
+#define BGMAC_DMA_TX_BL_16 0
+#define BGMAC_DMA_TX_BL_32 1
+#define BGMAC_DMA_TX_BL_64 2
+#define BGMAC_DMA_TX_BL_128 3
+#define BGMAC_DMA_TX_BL_256 4
+#define BGMAC_DMA_TX_BL_512 5
+#define BGMAC_DMA_TX_BL_1024 6
+#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_TX_PC_SHIFT 21
+#define BGMAC_DMA_TX_PC_0 0
+#define BGMAC_DMA_TX_PC_4 1
+#define BGMAC_DMA_TX_PC_8 2
+#define BGMAC_DMA_TX_PC_16 3
+#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_TX_PT_SHIFT 24
+#define BGMAC_DMA_TX_PT_1 0
+#define BGMAC_DMA_TX_PT_2 1
+#define BGMAC_DMA_TX_PT_4 2
+#define BGMAC_DMA_TX_PT_8 3
#define BGMAC_DMA_TX_INDEX 0x04
#define BGMAC_DMA_TX_RINGLO 0x08
#define BGMAC_DMA_TX_RINGHI 0x0C
@@ -284,8 +295,33 @@
#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_RX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_RX_BL_SHIFT 18
+#define BGMAC_DMA_RX_BL_16 0
+#define BGMAC_DMA_RX_BL_32 1
+#define BGMAC_DMA_RX_BL_64 2
+#define BGMAC_DMA_RX_BL_128 3
+#define BGMAC_DMA_RX_BL_256 4
+#define BGMAC_DMA_RX_BL_512 5
+#define BGMAC_DMA_RX_BL_1024 6
+#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_RX_PC_SHIFT 21
+#define BGMAC_DMA_RX_PC_0 0
+#define BGMAC_DMA_RX_PC_4 1
+#define BGMAC_DMA_RX_PC_8 2
+#define BGMAC_DMA_RX_PC_16 3
+#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_RX_PT_SHIFT 24
+#define BGMAC_DMA_RX_PT_1 0
+#define BGMAC_DMA_RX_PT_2 1
+#define BGMAC_DMA_RX_PT_4 2
+#define BGMAC_DMA_RX_PT_8 3
#define BGMAC_DMA_RX_INDEX 0x24
#define BGMAC_DMA_RX_RINGLO 0x28
#define BGMAC_DMA_RX_RINGHI 0x2C
@@ -342,10 +378,6 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
-#define BGMAC_SPEED_10 0x0001
-#define BGMAC_SPEED_100 0x0002
-#define BGMAC_SPEED_1000 0x0004
-
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -402,6 +434,7 @@ struct bgmac {
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -416,10 +449,9 @@ struct bgmac {
u32 int_mask;
u32 int_status;
- /* Speed-related */
- int speed;
- bool autoneg;
- bool full_duplex;
+ /* Current MAC state */
+ int mac_speed;
+ int mac_duplex;
u8 phyaddr;
bool has_robosw;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d9980ad00b4b..9d2dedadf2df 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -58,8 +57,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.4"
-#define DRV_MODULE_RELDATE "Aug 05, 2013"
+#define DRV_MODULE_VERSION "2.2.5"
+#define DRV_MODULE_RELDATE "December 20, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -1197,6 +1196,8 @@ bnx2_copper_linkup(struct bnx2 *bp)
{
u32 bmcr;
+ bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (bmcr & BMCR_ANENABLE) {
u32 local_adv, remote_adv, common;
@@ -1255,6 +1256,14 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
}
+ if (bp->link_up) {
+ u32 ext_status;
+
+ bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+ if (ext_status & EXT_STATUS_MDIX)
+ bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+ }
+
return 0;
}
@@ -2048,29 +2057,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
{
- u32 bmcr;
+ u32 bmcr, adv_reg, new_adv = 0;
u32 new_bmcr;
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+ adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
if (bp->autoneg & AUTONEG_SPEED) {
- u32 adv_reg, adv1000_reg;
- u32 new_adv = 0;
+ u32 adv1000_reg;
u32 new_adv1000 = 0;
- bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
- adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
+ new_adv |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
- new_adv |= ADVERTISE_CSMA;
- new_adv |= bnx2_phy_get_pause_adv(bp);
-
new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
-
if ((adv1000_reg != new_adv1000) ||
(adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
@@ -2090,6 +2097,10 @@ __acquires(&bp->phy_lock)
return 0;
}
+ /* advertise nothing when forcing speed */
+ if (adv_reg != new_adv)
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
new_bmcr = 0;
if (bp->req_line_speed == SPEED_100) {
new_bmcr |= BMCR_SPEED100;
@@ -2341,9 +2352,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
}
/* ethernet@wirespeed */
- bnx2_write_phy(bp, 0x18, 0x7007);
- bnx2_read_phy(bp, 0x18, &val);
- bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+ bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+ val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+ /* auto-mdix */
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+ val |= AUX_CTL_MISC_CTL_AUTOMDIX;
+
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
return 0;
}
@@ -3234,7 +3251,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if ((bp->dev->features & NETIF_F_RXHASH) &&
((status & L2_FHDR_STATUS_USE_RXHASH) ==
L2_FHDR_STATUS_USE_RXHASH))
- skb->rxhash = rx_hdr->l2_fhdr_hash;
+ skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
napi_gro_receive(&bnapi->napi, skb);
@@ -6865,6 +6883,12 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, bp->line_speed);
cmd->duplex = bp->duplex;
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
}
else {
ethtool_cmd_speed_set(cmd, -1);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d23e56b..f1cf2c44e7ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6471,6 +6471,15 @@ struct l2_fhdr {
#define BCM5708S_TX_ACTL3 0x17
+#define MII_BNX2_EXT_STATUS 0x11
+#define EXT_STATUS_MDIX (1 << 13)
+
+#define MII_BNX2_AUX_CTL 0x18
+#define AUX_CTL_MISC_CTL 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX (1 << 9)
+#define AUX_CTL_MISC_CTL_WR (1 << 15)
+
#define MII_BNX2_DSP_RW_PORT 0x15
#define MII_BNX2_DSP_ADDRESS 0x17
#define MII_BNX2_DSP_EXPAND_REG 0x0f00
@@ -6844,6 +6853,7 @@ struct bnx2 {
#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
+#define BNX2_PHY_FLAG_MDIX 0x00004000
u32 mii_bmcr;
u32 mii_bmsr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ec6119089b82..391f29ef6d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -472,7 +472,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -1566,6 +1566,7 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
@@ -1573,6 +1574,7 @@ struct bnx2x {
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -2080,7 +2082,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -2463,7 +2464,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index bf811565ee24..9d7419e0390b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -30,6 +30,43 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ return bnx2x_num_queues ?
+ min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
+}
+
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -145,7 +182,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
}
}
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -359,7 +396,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
/* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -367,11 +404,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -425,7 +464,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -733,8 +772,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -817,7 +855,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
struct bnx2x *bp = fp->bp;
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
@@ -851,7 +889,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -992,8 +1031,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -1486,7 +1525,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{
bnx2x_free_tx_skbs_cnic(bp);
bnx2x_free_rx_skbs_cnic(bp);
@@ -2265,7 +2304,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
* virtualized environments a pf from another VM may have already
* initialized the device including loading FW
*/
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2284,8 +2323,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
return -EBUSY;
}
}
@@ -2298,16 +2341,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
return FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
+ else if (bnx2x_load_count[path][1 + port] == 1)
return FW_MSG_CODE_DRV_LOAD_PORT;
else
return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -2600,7 +2643,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
/* what did mcp say? */
- rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
@@ -3065,7 +3108,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/*
* net_device service functions
*/
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
u8 cos;
@@ -4192,7 +4235,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{
int i;
for_each_cnic_queue(bp, i)
@@ -4406,7 +4449,7 @@ alloc_mem_err:
return 0;
}
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
/* FCoE */
@@ -4419,7 +4462,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
return 0;
}
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 41f3ca5ad972..17d1689aec6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -26,10 +26,8 @@
#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -417,35 +415,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
-void bnx2x_set_rx_mode(struct net_device *dev);
void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp: driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp: driver handle
- * @cl_id: client id
- * @rx_mode_flags: rx mode configuration
- * @rx_accept_flags: rx accept configuration
- * @tx_accept_flags: tx accept configuration (tx switch)
- * @ramrod_flags: ramrod configuration
- */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
-
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
bool bnx2x_clear_pf_load(struct bnx2x *bp);
@@ -565,9 +536,6 @@ int bnx2x_reload_if_running(struct net_device *dev);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -578,13 +546,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
@@ -608,15 +572,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * bnx2x_poll - NAPI callback
- *
- * @napi: napi structure
- * @budget:
- *
- */
-int bnx2x_poll(struct napi_struct *napi, int budget);
-
-/**
* bnx2x_low_latency_recv - LL callback
*
* @napi: napi structure
@@ -862,30 +817,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
{
int i;
@@ -919,14 +850,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
}
}
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
- return num_queues ?
- min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
-}
-
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
@@ -1173,8 +1096,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
-
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
int txq_index, __le16 *tx_cons_sb,
@@ -1207,47 +1128,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
return bp->igu_base_sb;
}
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- unsigned long q_type = 0;
-
- bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
- bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
- BNX2X_FCOE_ETH_CL_ID_IDX);
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
- bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
- bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
- fp);
-
- DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
- /* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
- /* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
- bnx2x_rx_ustorm_prods_offset(fp);
-
- /* Configure Queue State object */
- __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
- __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
- /* No multi-CoS for FCoE L2 client */
- BUG_ON(fp->max_cos != 1);
-
- bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
- &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
- bnx2x_sp_mapping(bp, q_rdata), q_type);
-
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
-}
-
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 32d0f1435fb4..38fc794c1655 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- switch (cmd->port) {
- case PORT_TP:
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_TP ||
- bp->port.supported[1] & SUPPORTED_TP)) {
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
- }
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- break;
- case PORT_FIBRE:
- case PORT_DA:
- if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
- bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ if (cmd->port != bnx2x_get_port_type(bp)) {
+ switch (cmd->port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- break;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
}
/* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
@@ -1639,6 +1637,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 11fc79585491..9b6b3d7304b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
(_bank + (_addr & 0xf)), \
_val)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
udelay(30);
}
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
- u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 val_xon = 0;
- u32 val_xoff = 0;
-
- DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
- /* PFC received frames */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
- pfc_frames_received[0] = val_xon + val_xoff;
-
- /* PFC received sent */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_SENT);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
- pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
-
- DP(NETIF_MSG_LINK, "pfc statistic\n");
-
- if (!vars->link_up)
- return;
-
- if (vars->mac_type == MAC_TYPE_EMAC) {
- DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
- bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
- }
-}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
@@ -8648,8 +8602,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
}
}
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
@@ -13413,9 +13367,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
* a fault, for example, due to break in the TX side of fiber.
*
******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars,
- u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
{
struct bnx2x *bp = params->bp;
u32 lss_status = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 4df45234fdc0..389f5f8cb0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2]);
+
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params);
-
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars, u8 notify);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0067b975873f..c9c445e7b4a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -93,8 +94,8 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, 0);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
@@ -102,7 +103,7 @@ static int disable_tpa;
module_param(disable_tpa, int, 0);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-int int_mode;
+static int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -278,6 +279,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
/****************************************************************************
* General service functions
****************************************************************************/
@@ -3000,6 +3007,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
@@ -3297,6 +3307,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -5852,11 +5866,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5964,7 +5978,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
@@ -6160,6 +6174,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
@@ -8732,16 +8787,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]--;
- load_count[path][1 + port]--;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 0)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[path][1 + port] == 0)
+ else if (bnx2x_load_count[path][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -9767,7 +9822,7 @@ period_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9854,6 +9909,64 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -10054,7 +10167,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -10142,10 +10255,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ bnx2x_prev_unload_undi_mf(bp);
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
@@ -10265,8 +10385,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
@@ -11636,7 +11756,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
if (CHIP_REV_IS_FPGA(bp))
@@ -11931,7 +12055,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
}
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -12156,6 +12280,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -12262,6 +12394,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -12693,8 +12833,6 @@ static int set_is_vf(int chip_id)
}
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -12869,6 +13007,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -12942,6 +13082,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
+ bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
@@ -12961,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
- }
- pci_disable_device(pdev);
+ pci_disable_device(pdev);
+ }
}
static void bnx2x_remove_one(struct pci_dev *pdev)
@@ -13120,6 +13261,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -13758,7 +13907,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13808,7 +13957,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
u32 offset = BAR_USTRORM_INTMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 14ffb6e56e59..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5932,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 18438a504d57..0fb6ff2ac8e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->get(mp, 1))
- return false;
-
- if (!vp->get(vp, 1)) {
- mp->put(mp, 1);
- return false;
- }
-
- return true;
-}
-
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->put(mp, 1))
- return false;
-
- if (!vp->put(vp, 1)) {
- mp->get(mp, 1);
- return false;
- }
-
- return true;
-}
-
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
}
}
-/**
- * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @bp: device handle
- * @o: vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- * possibly releasing and reclaiming the execution queue lock.
- */
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o)
-{
- spin_lock_bh(&o->exe_queue.lock);
- __bnx2x_vlan_mac_h_write_unlock(bp, o);
- spin_unlock_bh(&o->exe_queue.lock);
-}
/**
* __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
@@ -663,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
@@ -685,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return -EEXIST;
-
- return 0;
-}
-
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -716,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
@@ -739,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
-static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return pos;
-
- return NULL;
-}
-
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -811,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
{
u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -1126,97 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct eth_classify_rules_ramrod_data *data =
- (struct eth_classify_rules_ramrod_data *)(raw->rdata);
- int rule_cnt = rule_idx + 1;
- union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
- u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
- u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
- /* Reset the ramrod data buffer for the first rule */
- if (rule_idx == 0)
- memset(data, 0, sizeof(*data));
-
- /* Set a rule header */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set VLAN and MAC themselves */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
- /* MOVE: Add a rule that will add this MAC to the target Queue */
- if (cmd == BNX2X_VLAN_MAC_MOVE) {
- rule_entry++;
- rule_cnt++;
-
- /* Setup ramrod data */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
- elem->cmd_data.vlan_mac.target_obj,
- true, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set a VLAN itself */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.
- vlan_mac.is_inner_mac);
- }
-
- /* Set the ramrod data header */
- /* TODO: take this to the higher level in order to prevent multiple
- writing */
- bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
- rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp: device handle
- * @o: bnx2x_vlan_mac_obj
- * @elem: bnx2x_exeq_elem
- * @rule_idx: rule_idx
- * @cam_offset: cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)(raw->rdata);
- /* 57710 and 57711 do not support MOVE command,
- * so it's either ADD or DEL
- */
- bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
- true : false;
-
- /* Reset the ramrod data buffer */
- memset(config, 0, sizeof(*config));
-
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
- cam_offset, add,
- elem->cmd_data.vlan_mac.u.vlan_mac.mac,
- elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
- ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1316,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
- struct bnx2x_exe_queue_obj *o,
- struct bnx2x_exeq_elem *elem)
-{
- struct bnx2x_exeq_elem *pos;
- struct bnx2x_vlan_mac_ramrod_data *data =
- &elem->cmd_data.vlan_mac.u.vlan_mac;
-
- /* Check pending for execution commands */
- list_for_each_entry(pos, &o->exe_queue, link)
- if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
- sizeof(*data)) &&
- (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
- return pos;
-
- return NULL;
-}
-
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2241,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool)
-{
- union bnx2x_qable_obj *qable_obj =
- (union bnx2x_qable_obj *)vlan_mac_obj;
-
- bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
- rdata_mapping, state, pstate, type,
- macs_pool, vlans_pool);
-
- /* CAM pool handling */
- vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
- vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
- /* CAM offset is relevant for 57710 and 57711 chips only which have a
- * single CAM for both MACs and VLAN-MAC pairs. So the offset
- * will be taken from MACs' pool object only.
- */
- vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
- vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
- if (CHIP_IS_E1(bp)) {
- BNX2X_ERR("Do not support chips others than E2\n");
- BUG();
- } else if (CHIP_IS_E1H(bp)) {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move_always_err;
- vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue, 1, qable_obj,
- bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- } else {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move;
- vlan_mac_obj->ramrod_cmd =
- RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue,
- CLASSIFY_RULES_COUNT,
- qable_obj, bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- }
-}
-
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -4990,6 +4728,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 6a53c15c85a3..00d7f214a40a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -448,9 +448,6 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
};
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index);
-
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -770,7 +767,9 @@ enum {
BNX2X_Q_UPDATE_DEF_VLAN_EN,
BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- BNX2X_Q_UPDATE_SILENT_VLAN_REM
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING
};
/* Allowed Queue states */
@@ -1307,22 +1306,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool);
-
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e7845e5be1c7..aec5ef2ed7ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -166,6 +166,7 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_RXMODE,
BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
BNX2X_VFOP_QTEARDOWN_QDTOR,
BNX2X_VFOP_QTEARDOWN_DONE
};
@@ -617,7 +618,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
&vlan_mac->user_req.vlan_mac_flags,
&vlan_mac->ramrod_flags);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
/* next state */
@@ -628,7 +629,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vfop->rc == -EEXIST)
vfop->rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
vfop->rc = !!obj->raw.check_pending(&obj->raw);
@@ -645,7 +646,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */
@@ -657,7 +658,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
}
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
default:
bnx2x_vfop_default(state);
@@ -798,10 +799,10 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
return -ENOMEM;
}
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
+static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
@@ -1023,25 +1024,35 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
- true);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
- true);
- DP(BNX2X_MSG_IOV,
- "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
- vf->abs_vfid, vfop->rc);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1112,7 +1123,10 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_MCAST_DEL:
/* clear existing mcasts */
- vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
+ : BNX2X_VFOP_MCAST_CHK_DONE;
+ mcast->mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = args->mc_num;
vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
@@ -1120,17 +1134,17 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (raw->check_pending(raw))
goto op_pending;
- if (args->mc_num) {
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- }
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ mcast->mcast_list_len = args->mc_num;
+
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MCAST_CHK_DONE:
@@ -1312,12 +1326,19 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
/* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
+ case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
case BNX2X_VFOP_QTEARDOWN_QDTOR:
/* run the queue destruction flow */
DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@ -2197,6 +2218,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
+ vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2372,8 +2394,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2425,15 +2448,9 @@ get_vf:
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_VF_FLR:
- DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
- vf->abs_vfid);
- /* Do nothing for now */
- break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
- break;
+ return 0;
}
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid, false);
@@ -2857,13 +2874,9 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
return;
}
-
- /* remove multicasts */
vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_CLOSE_HW:
@@ -2897,6 +2910,9 @@ op_done:
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ /* Not supported at the moment; Exists for macros only */
+ return;
}
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@ -3119,6 +3135,60 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->abs_vfid, vf->op_current);
}
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
+}
+
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
@@ -3146,12 +3216,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
pci_disable_sriov(dev);
return 0;
} else {
return bnx2x_enable_sriov(bp);
}
}
+
#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
@@ -3229,6 +3301,11 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
+
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3639,7 +3716,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c213fa52174..d9fcca1b5a9d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -269,6 +269,7 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
+ int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
@@ -664,11 +665,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only);
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add);
-
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
@@ -726,13 +722,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length);
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length);
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
-
bool bnx2x_tlv_supported(u16 tlvtype);
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
@@ -749,7 +738,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
@@ -813,7 +801,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0756d7dabdd5..3fa6c2a2a5a9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -21,9 +21,11 @@
#include "bnx2x_cmn.h"
#include <linux/crc32.h>
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
/* place a given tlv on the tlv buffer at a given offset */
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length)
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
{
struct channel_tlv *tl =
(struct channel_tlv *)(tlvs_list + offset);
@@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
}
/* Clear the mailbox and init the header of the first tlv */
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length)
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
{
mutex_lock(&bp->vf2pf_mutex);
@@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
}
/* releases the mailbox */
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
{
DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
first_tlv->tl.type);
@@ -85,7 +88,7 @@ static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
}
/* list the types and lengths of the tlvs on the buffer */
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
int i = 1;
struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
@@ -633,7 +636,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
{
struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -800,14 +803,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
- resp->hdr.status);
- rc = -EINVAL;
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -1416,6 +1423,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
@@ -1706,7 +1721,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 &&
- memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f58a8b80302d..fcf9105a5476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5220,6 +5220,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_ring_ctl(dev, cid, cli, 1);
*cid_ptr = cid >> 4;
*(cid_ptr + 1) = cid * bp->db_size;
+ *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
}
}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0121a5d55192..0d6b13f854d9 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -186,6 +186,8 @@ struct kcq_info {
u16 (*hw_idx)(u16);
};
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ebbfe25acaa6..8cf6b1926069 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.18"
-#define CNIC_MODULE_RELDATE "Sept 01, 2013"
+#define CNIC_MODULE_VERSION "2.5.19"
+#define CNIC_MODULE_RELDATE "December 19, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index c2777712da99..b61c14ed9b8d 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* This driver is designed for the Broadcom SiByte SOC built-in
@@ -36,7 +35,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 15a66e4b1f57..e2ca03e23dc1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
@@ -37,6 +36,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 134
+#define TG3_MIN_NUM 136
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Sep 16, 2013"
+#define DRV_MODULE_RELDATE "Jan 03, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -208,6 +208,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RAW_IP_ALIGN 2
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
@@ -3948,32 +3951,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
return 0;
}
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+ u32 addr_high, addr_low;
+
+ addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ if (index < 4) {
+ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+ } else {
+ index -= 4;
+ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+ }
+}
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
- u32 addr_high, addr_low;
+ u32 addr_high;
int i;
- addr_high = ((tp->dev->dev_addr[0] << 8) |
- tp->dev->dev_addr[1]);
- addr_low = ((tp->dev->dev_addr[2] << 24) |
- (tp->dev->dev_addr[3] << 16) |
- (tp->dev->dev_addr[4] << 8) |
- (tp->dev->dev_addr[5] << 0));
for (i = 0; i < 4; i++) {
if (i == 1 && skip_mac_1)
continue;
- tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
tg3_asic_rev(tp) == ASIC_REV_5704) {
- for (i = 0; i < 12; i++) {
- tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- }
+ for (i = 4; i < 16; i++)
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
addr_high = (tp->dev->dev_addr[0] +
@@ -4403,9 +4415,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+ if (!(tp->phy_flags &
+ TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -8925,6 +8940,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
}
+static void tg3_override_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+ val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
@@ -9013,6 +9071,13 @@ static int tg3_chip_reset(struct tg3 *tp)
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
}
+ /* Set the clock to the highest frequency to avoid timeouts. With link
+ * aware mode, the clock speed could be slow and bootcode does not
+ * complete within the expected time. Override the clock to allow the
+ * bootcode to finish sooner and then restore it.
+ */
+ tg3_override_clk(tp);
+
/* Manage gphy power for all CPMU absent PCIe devices. */
if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@ -9151,10 +9216,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(0x7c00, val | (1 << 25));
}
- if (tg3_asic_rev(tp) == ASIC_REV_5720) {
- val = tr32(TG3_CPMU_CLCK_ORIDE);
- tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
- }
+ tg3_restore_clk(tp);
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
@@ -9186,6 +9248,7 @@ static int tg3_chip_reset(struct tg3 *tp)
static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@ -9246,6 +9309,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
+ __tg3_set_rx_mode(dev);
spin_unlock_bh(&tp->lock);
return err;
@@ -9634,6 +9698,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
tw32(MAC_HASH_REG_3, mc_filter[3]);
}
+ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* Add all entries into to the mac addr filter list */
+ int i = 0;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ __tg3_set_one_mac_addr(tp, ha->addr,
+ i + TG3_UCAST_ADDR_IDX(tp));
+ i++;
+ }
+ }
+
if (rx_mode != tp->rx_mode) {
tp->rx_mode = rx_mode;
tw32_f(MAC_RX_MODE, rx_mode);
@@ -9966,6 +10044,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@ -10751,6 +10830,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@ -10879,6 +10959,13 @@ static void tg3_timer(unsigned long __opaque)
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
+ } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+ u32 cpmu = tr32(TG3_CPMU_STATUS);
+ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+ TG3_CPMU_STATUS_LINK_MASK);
+
+ if (link_up != tp->link_up)
+ tg3_setup_phy(tp, false);
}
tp->timer_counter = tp->timer_multiplier;
@@ -11746,8 +11833,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
get_stat64(&hw_stats->rx_frame_too_long_errors) +
get_stat64(&hw_stats->rx_undersize_packets);
- stats->rx_over_errors = old_stats->rx_over_errors +
- get_stat64(&hw_stats->rxbds_empty);
stats->rx_frame_errors = old_stats->rx_frame_errors +
get_stat64(&hw_stats->rx_align_errors);
stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@ -13594,14 +13679,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct tg3 *tp = netdev_priv(dev);
struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
@@ -13682,6 +13766,67 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
-EFAULT : 0;
}
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+ switch (tp->rxptpctl) {
+ case 0:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -13735,7 +13880,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
case SIOCSHWTSTAMP:
- return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+ return tg3_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return tg3_hwtstamp_get(dev, ifr);
default:
/* do nothing */
@@ -14856,7 +15004,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg, led_cfg;
- u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+ u32 nic_phy_id, ver, eeprom_phy_id;
int eeprom_phy_serdes = 0;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -14873,6 +15022,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
eeprom_phy_serdes = 1;
@@ -15025,6 +15179,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
}
done:
if (tg3_flag(tp, WOL_CAP))
@@ -15120,9 +15277,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)
{
u32 adv = ADVERTISED_Autoneg;
- if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
adv |= ADVERTISED_100baseT_Half |
@@ -16470,6 +16629,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Set these bits to enable statistics workaround. */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@ -16612,6 +16772,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
else
tg3_flag_clear(tp, POLL_SERDES);
+ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+ tg3_flag_set(tp, POLL_CPMU_LINK);
+
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@ -17533,6 +17696,7 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5c3835aa1e1b..ef472385bce4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1146,10 +1146,14 @@
#define TG3_CPMU_CLCK_ORIDE 0x00003624
#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628
+#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13)
+
#define TG3_CPMU_STATUS 0x0000362c
#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
#define TG3_CPMU_STATUS_FSHFT_5719 30
+#define TG3_CPMU_STATUS_LINK_MASK 0x180000
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
@@ -2204,7 +2208,7 @@
#define NIC_SRAM_DATA_CFG_2 0x00000d38
-#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000
#define SHASTA_EXT_LED_MODE_MASK 0x00018000
#define SHASTA_EXT_LED_LEGACY 0x00000000
#define SHASTA_EXT_LED_SHARED 0x00008000
@@ -2226,6 +2230,9 @@
#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
+#define NIC_SRAM_DATA_CFG_5 0x00000e0c
+#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002
+
#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -3014,6 +3021,7 @@ enum TG3_FLAGS {
TG3_FLAG_ENABLE_ASF,
TG3_FLAG_ASPM_WORKAROUND,
TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_POLL_CPMU_LINK,
TG3_FLAG_MBOX_WRITE_REORDER,
TG3_FLAG_PCIX_TARGET_HWBUG,
TG3_FLAG_WOL_SPEED_100MB,
@@ -3325,6 +3333,7 @@ struct tg3 {
#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000
u32 led_ctrl;
u32 phy_otp;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..1803c3959044 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
/* IOC local definitions */
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -42,6 +50,14 @@
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
- u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+ enum bfi_fwboot_type boot_type, u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1162,7 +1178,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
}
- fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return;
@@ -1176,8 +1192,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
}
bfa_ioc_fwver_clear(ioc);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
/*
* Try to lock and then unlock the semaphore.
@@ -1309,22 +1325,504 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+ struct bfi_ioc_image_hdr *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return false;
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return false;
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return false;
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return false;
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+ return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+ if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ else
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_WRDATA_REG 0x0001d00c
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+#define NFC_STATE_RUNNING 0x20000001
+#define NFC_STATE_PAUSED 0x00004560
+#define NFC_VER_VALID 0x147
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
+ BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
+ BFA_FLASH_WRITE = 0x02, /* write */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /* Check the device status. It may take some time. */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /* len must be mutiple of 4 and not exceeding fifo size */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /* check status */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /* check if write-in-progress bit is cleared */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+ u32 i;
+
+ /* read data fifo up to 32 words */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *)(buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+ return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ udelay(10000);
+ }
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+static enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr *flash_fwhdr;
+ enum bfa_status status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
struct bfi_ioc_image_hdr *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
- return false;
+ /* If smem is incompatible or old, driver should not work with it. */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return false;
}
- return true;
+ /* IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+ return false;
+ else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+ return true;
+ else
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ true : false;
}
/* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1831,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr fwhdr;
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != drv_fwhdr->signature)
- return false;
-
if (swab32(fwhdr.bootenv) != boot_env)
return false;
@@ -1366,7 +1858,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
bool fwvalid;
u32 boot_env;
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1872,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+
return;
}
@@ -1411,8 +1905,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1517,7 +2012,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
}
/* Initiate a full firmware download. */
-static void
+static enum bfa_status
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1527,18 +2022,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ enum bfa_status status;
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/**
@@ -1566,6 +2090,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2102,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_FWBOOT_ENV_OFF)));
+ return BFA_STATUS_OK;
}
static void
@@ -1846,29 +2375,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-static void
+static enum bfa_status
bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_env)
{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ enum bfa_status status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+ /* Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/**
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
} else {
- writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else
+ bfa_nw_iocpf_timeout(ioc);
+
+ return status;
}
/* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3020,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
static void
bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
{
- u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
return false;
}
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+ u8 patch;
+ u8 maint;
+ u8 minor;
+ u8 major;
+ u8 rsvd[2];
+ u8 build;
+ u8 phase;
+#else
+ u8 major;
+ u8 minor;
+ u8 maint;
+ u8 patch;
+ u8 phase;
+ u8 build;
+ u8 rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
struct bfi_enet_rx_cfg {
u8 rxq_type;
- u8 rsvd[3];
+ u8 rsvd[1];
+ u16 frame_size;
struct {
u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do { \
} \
} while (0)
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&ucam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+ }
+
ucam_mod->bna = bna;
}
@@ -1818,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
- int i = 0;
+ int i;
+ i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
+ i = 0;
+ list_for_each(qe, &ucam_mod->del_q)
+ i++;
+
ucam_mod->bna = NULL;
}
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
&mcam_mod->free_handle_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&mcam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+ }
+
mcam_mod->bna = bna;
}
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
+ list_for_each(qe, &mcam_mod->del_q) i++;
+
+ i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- attr->num_ucmac * sizeof(struct bna_mac);
+ (attr->num_ucmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast MAC address - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mac);
+ (attr->num_mcmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast handle - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
}
struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
- if (list_empty(&mcam_mod->free_q))
+ if (list_empty(head))
return NULL;
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
+ bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
+ list_add_tail(&mac->qe, tail);
}
struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
struct list_head *qe;
int ret;
- /* Delete multicast entries previousely added */
+ /* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_pending_mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+ rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
return BNA_CB_SUCCESS;
}
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
+ struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
- struct bna_mac *mac;
+ struct bna_mac *mac, *del_mac;
int i;
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ for (i = 0, mcaddr = uclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
-
mcaddr += ETH_ALEN;
}
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac, *del_mac;
+ int i;
+
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ }
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
}
/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac, *del_mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+}
+
+void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
return 1;
}
}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+ cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
- q0->buffer_size =
- bna_enet_mtu_get(&rx->bna->enet);
+ if (q0->multi_buffer)
+ /* multi-buffer is enabled by allocating
+ * a new rx with new set of resources.
+ * q0->buffer_size should be initialized to
+ * fragment size.
+ */
+ cfg_req->rx_cfg.multi_buffer =
+ BNA_STATUS_T_ENABLED;
+ else
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ dq_depth = q_cfg->q0_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
- u32 page_count;
+ struct bna_mem_descr *hqunmap_mem;
+ struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i;
- int dpage_count, hpage_count, rcb_idx;
+ u32 dpage_count, hpage_count;
+ u32 hq_idx, dq_idx, rcb_idx;
+ u32 cq_depth, i;
+ u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+ hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}
rx->num_paths = rx_cfg->num_paths;
- for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+ i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+ rcb_idx++; dq_idx++;
+ q0->rcb->q_depth = rx_cfg->q0_depth;
+ q0->q_depth = rx_cfg->q0_depth;
+ q0->multi_buffer = rx_cfg->q0_multi_buf;
+ q0->buffer_size = rx_cfg->q0_buf_size;
+ q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+ rcb_idx++; hq_idx++;
+ q1->rcb->q_depth = rx_cfg->q1_depth;
+ q1->q_depth = rx_cfg->q1_depth;
+ q1->multi_buffer = BNA_STATUS_T_DISABLED;
+ q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
- : rx_cfg->small_buff_size;
+ : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
+ cq_depth = rx_cfg->q0_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q1_depth);
+ /* if multi-buffer is enabled sum of q0_depth
+ * and q1_depth need not be a power of 2
+ */
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_IBIDX = 12,
- BNA_RX_RES_MEM_T_RIT = 13,
- BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 15
+ BNA_RX_RES_MEM_T_UNMAPHQ = 2,
+ BNA_RX_RES_MEM_T_UNMAPDQ = 3,
+ BNA_RX_RES_MEM_T_CQPT = 4,
+ BNA_RX_RES_MEM_T_CSWQPT = 5,
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
+ BNA_RX_RES_MEM_T_HQPT = 7,
+ BNA_RX_RES_MEM_T_DQPT = 8,
+ BNA_RX_RES_MEM_T_HSWQPT = 9,
+ BNA_RX_RES_MEM_T_DSWQPT = 10,
+ BNA_RX_RES_MEM_T_DPAGE = 11,
+ BNA_RX_RES_MEM_T_HPAGE = 12,
+ BNA_RX_RES_MEM_T_IBIDX = 13,
+ BNA_RX_RES_MEM_T_RIT = 14,
+ BNA_RX_RES_T_INTR = 15,
+ BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
int buffer_size;
int q_depth;
+ u32 num_vecs;
+ enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
+ u32 pkts_una;
+ u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
- int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * enet->mtu.
+ * for SLR and HDS queue type.
*/
- int small_buff_size;
+ u32 frame_size;
+
+ /* header or small queue */
+ u32 q1_depth;
+ u32 q1_buf_size;
+
+ /* data or large queue */
+ u32 q0_depth;
+ u32 q0_buf_size;
+ u32 q0_num_vecs;
+ enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
/* CAM */
struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
+ struct list_head del_q;
struct bna *bna;
};
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
};
struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct bna_mac *mcmac; /* num_mcmac * 2 entries */
+ struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
+ struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..cf64f3d0b60d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
/*
* Global variables
*/
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vectors[vector], dma_addr),
- skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_len(&unmap->vectors[vector], dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
nvecs--;
}
@@ -282,27 +283,32 @@ static int
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
- int mtu, order;
+ int order;
bnad_rxq_alloc_uninit(bnad, rcb);
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
- order = get_order(mtu);
+ order = get_order(rcb->rxq->buffer_size);
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
if (bna_is_small_rxq(rcb->id)) {
unmap_q->alloc_order = 0;
unmap_q->map_size = rcb->rxq->buffer_size;
} else {
- unmap_q->alloc_order = order;
- unmap_q->map_size =
- (rcb->rxq->buffer_size > 2048) ?
- PAGE_SIZE << order : 2048;
+ if (rcb->rxq->multi_buffer) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
}
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
- unmap_q->type = BNAD_RXBUF_PAGE;
-
return 0;
}
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
for (i = 0; i < rcb->q_depth; i++) {
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
}
bnad_rxq_alloc_uninit(bnad, rcb);
}
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
return;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
}
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
- struct bnad_rx_unmap_q *unmap_q,
- struct bnad_rx_unmap *unmap,
- u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+ u32 sop_ci, u32 nvecs)
{
- struct bnad *bnad = rx_ctrl->bnad;
- struct sk_buff *skb;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+ u32 ci, vec;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
- skb = napi_get_frags(&rx_ctrl->napi);
- if (unlikely(!skb))
- return NULL;
+ unmap_q = rcb->unmap_q;
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
+ }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+ struct bnad *bnad;
+ u32 ci, vec, len, totlen = 0;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+
+ unmap_q = rcb->unmap_q;
+ bnad = rcb->bnad;
+
+ /* prefetch header */
+ prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+ unmap_q->unmap[sop_ci].page_offset);
+
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
+
+ len = (vec == nvecs) ?
+ last_fraglen : unmap->vector.len;
+ totlen += len;
+
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- unmap->page, unmap->page_offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
-
- return skb;
}
- skb = unmap->skb;
- BUG_ON(!skb);
+ skb->len += totlen;
+ skb->data_len += totlen;
+ skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+ struct bnad_rx_unmap *unmap, u32 len)
+{
+ prefetch(skb->data);
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
- skb_put(skb, length);
-
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, bnad->netdev);
unmap->skb = NULL;
unmap->vector.len = 0;
- return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cq, *cmpl;
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
struct bna_rcb *rcb = NULL;
struct bnad_rx_unmap_q *unmap_q;
- struct bnad_rx_unmap *unmap;
- struct sk_buff *skb;
+ struct bnad_rx_unmap *unmap = NULL;
+ struct sk_buff *skb = NULL;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
- u32 packets = 0, length = 0, flags, masked_flags;
+ u32 packets = 0, len = 0, totlen = 0;
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
+ u32 flags, masked_flags;
prefetch(bnad->netdev);
cq = ccb->sw_q;
cmpl = &cq[ccb->producer_index];
- while (cmpl->valid && (packets < budget)) {
- packets++;
- flags = ntohl(cmpl->flags);
- length = ntohs(cmpl->length);
+ while (packets < budget) {
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+ * the other fields of completion entry. Hence, do not load
+ * other fields of completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the compiler and/or
+ * CPU from reordering the reads which would potentially result
+ * in reading stale values in completion entry.
+ */
+ rmb();
+
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap = &unmap_q->unmap[rcb->consumer_index];
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
- BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
- bnad_rxq_cleanup_skb(bnad, unmap);
+ /* start of packet ci */
+ sop_ci = rcb->consumer_index;
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+ unmap = &unmap_q->unmap[sop_ci];
+ skb = unmap->skb;
+ } else {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ break;
+ }
+ prefetch(skb);
+
+ flags = ntohl(cmpl->flags);
+ len = ntohs(cmpl->length);
+ totlen = len;
+ nvecs = 1;
+
+ /* Check all the completions for this frame.
+ * busy-wait doesn't help much, break here.
+ */
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+ (flags & BNA_CQ_EF_EOP) == 0) {
+ pi = ccb->producer_index;
+ do {
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
+ next_cmpl = &cq[pi];
+
+ if (!next_cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only
+ * after writing the other fields of completion
+ * entry. Hence, do not load other fields of
+ * completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the
+ * compiler and/or CPU from reordering the reads
+ * which would potentially result in reading
+ * stale values in completion entry.
+ */
+ rmb();
+
+ len = ntohs(next_cmpl->length);
+ flags = ntohl(next_cmpl->flags);
+
+ nvecs++;
+ totlen += len;
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+ if (!next_cmpl->valid)
+ break;
+ }
+
+ /* TODO: BNA_CQ_EF_LOCAL ? */
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
rcb->rxq->rx_packets_with_error++;
+
goto next;
}
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
- length, flags);
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
+ else
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- if (unlikely(!skb))
- break;
+ packets++;
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += totlen;
+ ccb->bytes_per_intr += totlen;
masked_flags = flags & flags_cksum_prot_mask;
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += length;
-
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- napi_gro_frags(&rx_ctrl->napi);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
netif_receive_skb(skb);
+ else
+ napi_gro_frags(&rx_ctrl->napi);
next:
- cmpl->valid = 0;
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+ for (vec = 0; vec < nvecs; vec++) {
+ cmpl = &cq[ccb->producer_index];
+ cmpl->valid = 0;
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ }
cmpl = &cq[ccb->producer_index];
}
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
tx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!tx)
+ if (!tx) {
+ err = -ENOMEM;
goto err_return;
+ }
tx_info->tx = tx;
INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
err = bnad_tx_msix_register(bnad, tx_info,
tx_id, bnad->num_txq_per_tx);
if (err)
- goto err_return;
+ goto cleanup_tx;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
return 0;
+cleanup_tx:
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
err_return:
bnad_tx_res_free(bnad, res_info);
return err;
@@ -1930,6 +2039,7 @@ err_return:
static void
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
+ memset(rx_config, 0, sizeof(*rx_config));
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
memset(&rx_config->rss_config, 0,
sizeof(rx_config->rss_config));
}
+
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+ /* BNA_RXP_SINGLE - one data-buffer queue
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+ */
+ /* TODO: configurable param for queue type */
rx_config->rxp_type = BNA_RXP_SLR;
- rx_config->q_depth = bnad->rxq_depth;
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ rx_config->frame_size > 4096) {
+ /* though size_routing_enable is set in SLR,
+ * small packets may get routed to same rxq.
+ * set buf_size to 2048 instead of PAGE_SIZE.
+ */
+ rx_config->q0_buf_size = 2048;
+ /* this should be in multiples of 2 */
+ rx_config->q0_num_vecs = 4;
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+ } else {
+ rx_config->q0_buf_size = rx_config->frame_size;
+ rx_config->q0_num_vecs = 1;
+ rx_config->q0_depth = bnad->rxq_depth;
+ }
+
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
+ rx_config->q1_depth = bnad->rxq_depth;
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+ }
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
+static u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 err = 0, current_err = 0;
+ u32 rx_id = 0, count = 0;
+ unsigned long flags;
+
+ /* destroy and create new rx objects */
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ if (!bnad->rx_info[rx_id].rx)
+ continue;
+ bnad_destroy_rx(bnad, rx_id);
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ count++;
+ current_err = bnad_setup_rx(bnad, rx_id);
+ if (current_err && !err) {
+ err = current_err;
+ pr_err("RXQ:%u setup failed\n", rx_id);
+ }
+ }
+
+ /* restore rx configuration */
+ if (bnad->rx_info[0].rx && !err) {
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
+ }
+
+ return count;
+}
+
+/* Called with bnad_conf_lock() held */
void
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
{
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
- rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_config->num_paths),
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
- sizeof(struct bnad_rx_unmap_q)));
-
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+ rx_config->num_paths,
+ (rx_config->q0_depth *
+ sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q));
+
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+ rx_config->num_paths,
+ (rx_config->q1_depth *
+ sizeof(struct bnad_rx_unmap) +
+ sizeof(struct bnad_rx_unmap_q)));
+ }
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
if (err)
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev)
int err;
struct bnad *bnad = netdev_priv(netdev);
struct bna_pause_config pause_config;
- int mtu;
unsigned long flags;
mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev)
bnad_destroy_tx(bnad, 0);
bnad_destroy_rx(bnad, 0);
- /* These config flags are cleared in the hardware */
- bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
/* Synchronize mailbox IRQ */
bnad_mbox_irq_sync(bnad);
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
tcb = bnad->tx_info[0].tcb[txq_id];
- q_depth = tcb->q_depth;
- prod = tcb->producer_index;
-
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_tx_stop_all_queues() call.
*/
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+ unmap_q = tcb->unmap_q;
+
vectors = 1 + skb_shinfo(skb)->nr_frags;
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = skb_frag_size(frag);
+ u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
- dma_addr);
+ dma_addr);
head_unmap->nvecs++;
}
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
+ skb_tx_timestamp(skb);
+
bna_txq_prod_indx_doorbell(tcb);
smp_mb();
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int uc_count = netdev_uc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+ struct netdev_hw_addr *ha;
+ int entry;
+
+ if (netdev_uc_empty(bnad->netdev)) {
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ return;
+ }
+
+ if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+ goto mode_default;
+
+ mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+ if (mac_list == NULL)
+ goto mode_default;
+
+ entry = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ memcpy(&mac_list[entry * ETH_ALEN],
+ &ha->addr[0], ETH_ALEN);
+ entry++;
+ }
+
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_default;
+
+ return;
+
+ /* ucast packets not in UCAM are routed to default function */
+mode_default:
+ bnad->cfg_flags |= BNAD_CF_DEFAULT;
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int mc_count = netdev_mc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ goto mode_allmulti;
+
+ if (netdev_mc_empty(netdev))
+ return;
+
+ if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+ goto mode_allmulti;
+
+ mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+ if (mac_list == NULL)
+ goto mode_allmulti;
+
+ memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* copy rest of the MCAST addresses */
+ bnad_netdev_mc_list_get(netdev, mac_list);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_allmulti;
+
+ return;
+
+mode_allmulti:
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u32 new_mask, valid_mask;
+ enum bna_rxmode new_mode, mode_mask;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- new_mask = valid_mask = 0;
-
- if (netdev->flags & IFF_PROMISC) {
- if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
- new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags |= BNAD_CF_PROMISC;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_PROMISC) {
- new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags &= ~BNAD_CF_PROMISC;
- }
- }
-
- if (netdev->flags & IFF_ALLMULTI) {
- if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
- new_mask |= BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
- new_mask &= ~BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
- }
+ if (bnad->rx_info[0].rx == NULL) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
}
- if (bnad->rx_info[0].rx == NULL)
- goto unlock;
+ /* clear bnad flags to update it with new settings */
+ bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+ BNAD_CF_ALLMULTI);
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+ new_mode = 0;
+ if (netdev->flags & IFF_PROMISC) {
+ new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ } else {
+ bnad_set_rx_mcast_fltr(bnad);
- if (!netdev_mc_empty(netdev)) {
- u8 *mcaddr_list;
- int mc_count = netdev_mc_count(netdev);
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+ new_mode |= BNA_RXMODE_ALLMULTI;
- /* Index 0 holds the broadcast address */
- mcaddr_list =
- kzalloc((mc_count + 1) * ETH_ALEN,
- GFP_ATOMIC);
- if (!mcaddr_list)
- goto unlock;
+ bnad_set_rx_ucast_fltr(bnad);
- memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+ new_mode |= BNA_RXMODE_DEFAULT;
+ }
- /* Copy rest of the MC addresses */
- bnad_netdev_mc_list_get(netdev, mcaddr_list);
+ mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+ BNA_RXMODE_ALLMULTI;
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mcaddr_list, NULL);
+ if (bnad->cfg_flags & BNAD_CF_PROMISC)
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
- /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
- kfree(mcaddr_list);
- }
-unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
{
unsigned long flags;
init_completion(&bnad->bnad_completions.mtu_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
static int
bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
- int err, mtu = netdev->mtu;
+ int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
+ u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
+ mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
- err = bnad_mtu_set(bnad, mtu);
+ frame = BNAD_FRAME_SIZE(mtu);
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+ /* check if multi-buffer needs to be enabled */
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ netif_running(bnad->netdev)) {
+ /* only when transition is over 4K */
+ if ((frame <= 4096 && new_frame > 4096) ||
+ (frame > 4096 && new_frame <= 4096))
+ rx_count = bnad_reinit_rx(bnad);
+ }
+
+ /* rx_count > 0 - new rx created
+ * - Linux set err = 0 and return
+ */
+ err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
@@ -3262,7 +3524,6 @@ bnad_uninit(struct bnad *bnad)
if (bnad->bar0)
iounmap(bnad->bar0);
- pci_set_drvdata(bnad->pcidev, NULL);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.21.1"
+#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
-#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+#define BNAD_FRAME_SIZE(_mtu) \
+ (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
/*
* DATA STRUCTURES
*/
@@ -219,6 +222,7 @@ struct bnad_rx_info {
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
- u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
+ u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
- BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
- BNAD_RXBUF_MULTI = 3
+ BNAD_RXBUF_MULTI_BUFF = 3
};
-#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0];
+ struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+ ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+#define BNAD_CF_DEFAULT 0x08
+#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 92578690f6de..3190d38e16fb 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -17,6 +17,7 @@
#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -203,6 +204,47 @@ static int macb_mdio_reset(struct mii_bus *bus)
return 0;
}
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -250,6 +292,9 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
if (status_change) {
if (phydev->link) {
netif_carrier_on(dev);
@@ -1790,21 +1835,44 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
- bp->pclk = clk_get(&pdev->dev, "pclk");
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get macb_clk\n");
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
goto err_out_free_dev;
}
- clk_prepare_enable(bp->pclk);
- bp->hclk = clk_get(&pdev->dev, "hclk");
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
- goto err_out_put_pclk;
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
}
- clk_prepare_enable(bp->hclk);
- bp->regs = ioremap(regs->start, resource_size(regs));
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
+ }
+
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
@@ -1812,11 +1880,12 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
if (err) {
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
dev->irq, err);
- goto err_out_iounmap;
+ goto err_out_disable_clocks;
}
dev->netdev_ops = &macb_netdev_ops;
@@ -1879,7 +1948,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_disable_clocks;
}
err = macb_mii_init(bp);
@@ -1902,16 +1971,13 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_iounmap:
- iounmap(bp->regs);
err_out_disable_clocks:
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
+err_out_disable_pclk:
clk_disable_unprepare(bp->pclk);
-err_out_put_pclk:
- clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
err_out:
@@ -1933,12 +1999,10 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(bp->regs);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
clk_disable_unprepare(bp->pclk);
- clk_put(bp->pclk);
free_netdev(dev);
}
@@ -1946,45 +2010,49 @@ static int __exit macb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
netif_device_detach(netdev);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
return 0;
}
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
return 0;
}
-#else
-#define macb_suspend NULL
-#define macb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
- .suspend = macb_suspend,
- .resume = macb_resume,
.driver = {
.name = "macb",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
},
};
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index f4076155bed7..51c02442160a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -572,6 +572,7 @@ struct macb {
struct platform_device *pdev;
struct clk *pclk;
struct clk *hclk;
+ struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
struct work_struct tx_error_task;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4fc5c8ef5121..d2a183c3a6ce 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 8abb46b39032..53b1f9478383 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -50,7 +49,6 @@
#include <linux/if_vlan.h>
#include <linux/mdio.h>
#include <linux/crc32.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc739..a4d2a4c08d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc7..5249686afe71 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d021059f097..0fe7ff750d77 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -38,7 +37,6 @@
#include "common.h"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d9..81526ad36339 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff1955739..3e182eee799e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc0..162de5259df9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf7..dfa77491a910 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e5..d0cf611551a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb33a31b08a0..ec5e05052d99 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a6..964ce59ee169 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 8061fb0ef7ed..4c5879389003 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -47,7 +46,6 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/tcp.h>
#include <linux/ip.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index b9bf16b385f7..a1ba591b3431 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index e0a03a31e7c4..816719314cc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566a..7f79cc7ceb75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 8c82248ce416..442480982d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76ae09999b5b..c0a9dd55f4e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -182,7 +182,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
- if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 8d53438638b2..5f226eda8cd6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -429,7 +429,7 @@ found:
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
- if (memcmp(e->dmac, neigh->ha, 6))
+ if (!ether_addr_equal(e->dmac, neigh->ha))
setup_l2e_send_pending(dev, NULL, e);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 56e0415f8cdf..1f4b9b30b9ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
#include "cxgb4_uld.h"
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x06
-#define T4FW_VERSION_MICRO 0x18
+#define T4FW_VERSION_MINOR 0x09
+#define T4FW_VERSION_MICRO 0x17
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x08
-#define T5FW_VERSION_MICRO 0x1C
+#define T5FW_VERSION_MINOR 0x09
+#define T5FW_VERSION_MICRO 0x17
#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -387,8 +387,9 @@ struct work_struct;
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
- USING_MSI = (1 << 1),
- USING_MSIX = (1 << 2),
+ DEV_ENABLED = (1 << 1),
+ USING_MSI = (1 << 2),
+ USING_MSIX = (1 << 3),
FW_OK = (1 << 4),
RSS_TNLALLLOOKUP = (1 << 5),
USING_SOFT_PARAMS = (1 << 6),
@@ -938,7 +939,6 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
-int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -979,13 +979,6 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force);
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum);
int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size);
int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index fff02ed1295e..43ab35fea48d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4288,7 +4288,15 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
+ /* Block retrieving statistics during EEH error
+ * recovery. Otherwise, the recovery might fail
+ * and the PCI device will be removed permanently
+ */
spin_lock(&adapter->stats_lock);
+ if (!netif_device_present(dev)) {
+ spin_unlock(&adapter->stats_lock);
+ return ns;
+ }
t4_get_port_stats(adapter, p->tx_chan, &stats);
spin_unlock(&adapter->stats_lock);
@@ -5496,16 +5504,21 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
rtnl_lock();
adap->flags &= ~FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
netif_device_detach(dev);
netif_carrier_off(dev);
}
+ spin_unlock(&adap->stats_lock);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- pci_disable_device(pdev);
+ if ((adap->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adap->flags &= ~DEV_ENABLED;
+ }
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -5522,9 +5535,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
+ if (!(adap->flags & DEV_ENABLED)) {
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot reenable PCI "
+ "device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ adap->flags |= DEV_ENABLED;
}
pci_set_master(pdev);
@@ -5910,6 +5927,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ /* PCI device has been enabled */
+ adapter->flags |= DEV_ENABLED;
+
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
@@ -6143,10 +6163,13 @@ static void remove_one(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if ((adapter->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adapter->flags &= ~DEV_ENABLED;
+ }
pci_release_regions(pdev);
+ kfree(adapter);
} else
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc3511a5cd0c..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1630,7 +1630,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1686,7 +1687,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
rxq->stats.pkts++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e1413eacdbd2..2c109343d570 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -32,12 +32,13 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force);
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
@@ -1070,62 +1071,6 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
}
/**
- * t4_load_cfg - download config file
- * @adap: the adapter
- * @cfg_data: the cfg text file to write
- * @size: text file size
- *
- * Write the supplied config text file to the card's serial flash.
- */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
-{
- int ret, i, n;
- unsigned int addr;
- unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
- addr = t4_flash_cfg_addr(adap);
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
- return -EFBIG;
- }
-
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
- ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
- flash_cfg_start_sec + i - 1);
- /*
- * If size == 0 then we're simply erasing the FLASH sectors associated
- * with the on-adapter Firmware Configuration File.
- */
- if (ret || size == 0)
- goto out;
-
- /* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i < size; i += SF_PAGE_SIZE) {
- if ((size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
- if (ret)
- goto out;
-
- addr += SF_PAGE_SIZE;
- cfg_data += SF_PAGE_SIZE;
- }
-
-out:
- if (ret)
- dev_err(adap->pdev_dev, "config file %s failed %d\n",
- (size == 0 ? "clear" : "download"), ret);
- return ret;
-}
-
-/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
@@ -2810,7 +2755,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* be doing. The only way out of this state is to RESTART the firmware
* ...
*/
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
{
int ret = 0;
@@ -2875,7 +2820,7 @@ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* the chip since older firmware won't recognize the PCIE_FW.HALT
* flag and automatically RESET itself on startup.
*/
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
{
if (reset) {
/*
@@ -2938,8 +2883,8 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* positive errno indicates that the adapter is ~probably~ intact, a
* negative errno indicates that things are looking bad ...
*/
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force)
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force)
{
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
@@ -2964,78 +2909,6 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
return t4_fw_restart(adap, mbox, reset);
}
-
-/**
- * t4_fw_config_file - setup an adapter via a Configuration File
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @mtype: the memory type where the Configuration File is located
- * @maddr: the memory address where the Configuration File is located
- * @finiver: return value for CF [fini] version
- * @finicsum: return value for CF [fini] checksum
- * @cfcsum: return value for CF computed checksum
- *
- * Issue a command to get the firmware to process the Configuration
- * File located at the specified mtype/maddress. If the Configuration
- * File is processed successfully and return value pointers are
- * provided, the Configuration File "[fini] section version and
- * checksum values will be returned along with the computed checksum.
- * It's up to the caller to decide how it wants to respond to the
- * checksums not matching but it recommended that a prominant warning
- * be emitted in order to help people rapidly identify changed or
- * corrupted Configuration Files.
- *
- * Also note that it's possible to modify things like "niccaps",
- * "toecaps",etc. between processing the Configuration File and telling
- * the firmware to use the new configuration. Callers which want to
- * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- * Configuration Files if they want to do this.
- */
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum)
-{
- struct fw_caps_config_cmd caps_cmd;
- int ret;
-
- /*
- * Tell the firmware to process the indicated Configuration File.
- * If there are no errors and the caller has provided return value
- * pointers for the [fini] section version, checksum and computed
- * checksum, pass those back to the caller.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
- FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
- if (ret < 0)
- return ret;
-
- if (finiver)
- *finiver = ntohl(caps_cmd.finiver);
- if (finicsum)
- *finicsum = ntohl(caps_cmd.finicsum);
- if (cfcsum)
- *cfcsum = ntohl(caps_cmd.cfcsum);
-
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
-}
-
/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 61362450d05b..f412d0fa0850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -268,7 +268,6 @@ int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
-int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d958c44341b5..25dfeb8f28ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -363,8 +363,8 @@ int t4vf_fw_reset(struct adapter *adapter)
* Reads the values of firmware or device parameters. Up to 7 parameters
* can be queried at once.
*/
-int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
- const u32 *params, u32 *vals)
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
{
int i, ret;
struct fw_params_cmd cmd, rpl;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index ec88de4ac162..2be2a99c5ea3 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -18,7 +18,6 @@
#include <linux/mii.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ff78dfaec508..b740bfce72ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1036,11 +1036,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
- skb->rxhash = rss_hash;
- if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
- skb->l4_rxhash = true;
+ skb_set_hash(skb, rss_hash,
+ (rss_type &
+ (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 43464f0a4f99..e6a83198c3dd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -162,7 +162,7 @@ static int enic_are_pp_different(struct enic_port_profile *pp1,
return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
pp2->instance_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+ !ether_addr_equal(pp1->mac_addr, pp2->mac_addr);
}
static int enic_pp_preassociate(struct enic *enic, int vf,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 7080ad6c4014..a1a2b4028a5c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -110,8 +109,8 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
- unsigned int in_suspend :1;
- unsigned int wake_supported :1;
+ unsigned int in_suspend:1;
+ unsigned int wake_supported:1;
enum dm9000_type type;
@@ -162,7 +161,7 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
* Read a byte from I/O port
*/
static u8
-ior(board_info_t * db, int reg)
+ior(board_info_t *db, int reg)
{
writeb(reg, db->io_addr);
return readb(db->io_data);
@@ -173,7 +172,7 @@ ior(board_info_t * db, int reg)
*/
static void
-iow(board_info_t * db, int reg, int value)
+iow(board_info_t *db, int reg, int value)
{
writeb(reg, db->io_addr);
writeb(value, db->io_data);
@@ -745,9 +744,9 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_link = dm9000_get_link,
.get_wol = dm9000_get_wol,
.set_wol = dm9000_set_wol,
- .get_eeprom_len = dm9000_get_eeprom_len,
- .get_eeprom = dm9000_get_eeprom,
- .set_eeprom = dm9000_set_eeprom,
+ .get_eeprom_len = dm9000_get_eeprom_len,
+ .get_eeprom = dm9000_get_eeprom,
+ .set_eeprom = dm9000_set_eeprom,
};
static void dm9000_show_carrier(board_info_t *db,
@@ -795,7 +794,7 @@ dm9000_poll_work(struct work_struct *w)
}
} else
mii_check_media(&db->mii, netif_msg_link(db), 0);
-
+
if (netif_running(ndev))
dm9000_schedule_poll(db);
}
@@ -1252,12 +1251,11 @@ static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
dev_info(db->dev, "wake by link status change\n");
if (wcr & WCR_SAMPLEST)
dev_info(db->dev, "wake by sample packet\n");
- if (wcr & WCR_MAGICST )
+ if (wcr & WCR_MAGICST)
dev_info(db->dev, "wake by magic packet\n");
if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
dev_err(db->dev, "wake signalled with no reason? "
"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
-
}
spin_unlock_irqrestore(&db->lock, flags);
@@ -1314,7 +1312,7 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
-
+
dm9000_schedule_poll(db);
return 0;
@@ -1628,7 +1626,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
-
+
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index df5a892fb49c..1812f4916917 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <linux/init.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 93a4afaa09f1..dcf21a36a9cf 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/mii.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include "tulip.h"
@@ -458,7 +457,7 @@ void tulip_find_mii(struct net_device *dev, int board_idx)
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later,
but takes much time. */
- for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) {
int phy = phyn & 0x1f;
int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
if ((mii_status & 0x8301) == 0x8001 ||
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index a5397b130724..aa4ee385091f 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1192,9 +1192,6 @@ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
ULI526X_DBUG(0, "uli526x_suspend", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_save_state(pdev);
if (!netif_running(dev))
@@ -1228,9 +1225,6 @@ static int uli526x_resume(struct pci_dev *pdev)
ULI526X_DBUG(0, "uli526x_resume", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_restore_state(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index ab7ebac6fbea..6204cdfe43a6 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -28,7 +28,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 3699565704c7..7d07a0f5320d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -25,7 +25,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index f3d60eb13c3a..8a79a32a5674 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4ccaf9af6fc9..8d09615da585 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.224.0u"
+#define DRV_VER "10.0.600.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -42,7 +42,7 @@
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -283,7 +283,6 @@ struct be_rx_compl_info {
u32 rss_hash;
u16 vlan_tag;
u16 pkt_size;
- u16 rxq_idx;
u16 port;
u8 vlanf;
u8 num_rcvd;
@@ -493,7 +492,7 @@ struct be_adapter {
u16 pvid;
struct phy_info phy;
u8 wol_cap;
- bool wol;
+ bool wol_en;
u32 uc_macs; /* Count of secondary UC MAC programmed */
u16 asic_rev;
u16 qnq_vid;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 94c35c8d799d..48076a6370c3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1101,23 +1101,22 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 1;
- req->cq_id = cpu_to_le16(cq->id);
-
- AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
- ctxt, cq->id);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
- ctxt, 1);
-
- } else {
+ if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ } else {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
+ ctxt, 1);
}
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
@@ -1187,7 +1186,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int status;
status = be_cmd_mccq_ext_create(adapter, mccq, cq);
- if (status && !lancer_chip(adapter)) {
+ if (status && BEx_chip(adapter)) {
dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
"or newer to avoid conflicting priorities between NIC "
"and FCoE traffic");
@@ -2692,6 +2691,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_resp_get_fn_privileges *resp =
embedded_payload(wrb);
*privilege = le32_to_cpu(resp->privilege_mask);
+
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (BEx_chip(adapter) && be_is_mc(adapter) &&
+ be_physfn(adapter))
+ *privilege = MAX_PRIVILEGES;
}
err:
@@ -2736,7 +2742,8 @@ err:
* If pmac_id is returned, pmac_id_valid is returned as true
*/
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_valid, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
+ u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2774,7 +2781,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
if (*pmac_id_valid) {
req->mac_id = cpu_to_le32(*pmac_id);
- req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->iface_id = cpu_to_le16(if_handle);
req->perm_override = 0;
} else {
req->perm_override = 1;
@@ -2827,17 +2834,21 @@ out:
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain)
{
- bool active = true;
+ if (!active)
+ be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
+ if_handle, domain);
if (BEx_chip(adapter))
return be_cmd_mac_addr_query(adapter, mac, false,
- adapter->if_handle, curr_pmac_id);
+ if_handle, curr_pmac_id);
else
/* Fetch the MAC address using pmac_id */
return be_cmd_get_mac_from_list(adapter, mac, &active,
- &curr_pmac_id, 0);
+ &curr_pmac_id,
+ if_handle, domain);
}
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
@@ -2856,7 +2867,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
- NULL, 0);
+ NULL, adapter->if_handle, 0);
}
return status;
@@ -2917,7 +2928,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
int status;
status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, dom);
+ &pmac_id, if_id, dom);
+
if (!status && active_mac)
be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
@@ -2997,7 +3009,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
- if (!BEx_chip(adapter)) {
+ if (!BEx_chip(adapter) && mode) {
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
@@ -3028,14 +3040,16 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config_v1 *req;
- int status;
- int payload_len = sizeof(*req);
+ int status = 0;
struct be_dma_mem cmd;
if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
CMD_SUBSYSTEM_ETH))
return -EPERM;
+ if (be_is_wol_excluded(adapter))
+ return status;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -3060,7 +3074,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
- payload_len, wrb, &cmd);
+ sizeof(*req), wrb, &cmd);
req->hdr.version = 1;
req->query_options = BE_GET_WOL_CAP;
@@ -3070,13 +3084,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
- /* the command could succeed misleadingly on old f/w
- * which is not aware of the V1 version. fake an error. */
- if (resp->hdr.response_length < payload_len) {
- status = -1;
- goto err;
- }
adapter->wol_cap = resp->wol_settings;
+ if (adapter->wol_cap & BE_WOL_CAP)
+ adapter->wol_en = true;
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3085,6 +3095,76 @@ err:
return status;
}
+
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status;
+ int i, j;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+ if (!extfat_cmd.va)
+ return -ENOMEM;
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (status)
+ goto err;
+
+ cfgs = (struct be_fat_conf_params *)
+ (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
+ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+ for (j = 0; j < num_modes; j++) {
+ if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
+ cfgs->module[i].trace_lvl[j].dbg_lvl =
+ cpu_to_le32(level);
+ }
+ }
+
+ status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+err:
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+ return status;
+}
+
+int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status, j;
+ int level = 0;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+
+ if (!extfat_cmd.va) {
+ dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+ __func__);
+ goto err;
+ }
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+ if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+ }
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+err:
+ return level;
+}
+
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd)
{
@@ -3609,6 +3689,40 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
return status;
}
+/* Uses MBOX */
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
+{
+ struct be_cmd_req_get_active_profile *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
+ wrb, NULL);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_active_profile *resp =
+ embedded_payload(wrb);
+ *profile_id = le16_to_cpu(resp->active_profile_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0075686276aa..fc4e076dc202 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -216,6 +216,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FUNC_CONFIG 160
#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
+#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167
#define OPCODE_COMMON_SET_HSW_CONFIG 153
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
@@ -452,7 +453,7 @@ struct amap_mcc_context_be {
u8 rsvd2[32];
} __packed;
-struct amap_mcc_context_lancer {
+struct amap_mcc_context_v1 {
u8 async_cq_id[16];
u8 ring_size[4];
u8 rsvd0[12];
@@ -476,7 +477,7 @@ struct be_cmd_req_mcc_ext_create {
u16 num_pages;
u16 cq_id;
u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ u8 context[sizeof(struct amap_mcc_context_v1) / 8];
struct phys_addr pages[8];
} __packed;
@@ -1097,6 +1098,14 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->function_mode & FLEX10_MODE ||
+ adapter->function_mode & VNIC_MODE ||
+ adapter->function_mode & UMC_ENABLED;
+}
+
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1917,6 +1926,17 @@ struct be_cmd_resp_set_profile_config {
struct be_cmd_resp_hdr hdr;
};
+struct be_cmd_req_get_active_profile {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_active_profile {
+ struct be_cmd_resp_hdr hdr;
+ u16 active_profile_id;
+ u16 next_profile_id;
+} __packed;
+
struct be_cmd_enable_disable_vf {
struct be_cmd_req_hdr hdr;
u8 enable;
@@ -2037,8 +2057,10 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
u32 vf_num);
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain);
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+ bool *pmac_id_active, u32 *pmac_id,
+ u32 if_handle, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain);
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
@@ -2048,6 +2070,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
u16 intf_id, u8 *mode);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
+int be_cmd_get_fw_log_level(struct be_adapter *adapter);
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
@@ -2063,6 +2087,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 08330034d9ef..05be0070f55f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -713,12 +713,13 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (be_is_wol_supported(adapter)) {
+ if (adapter->wol_cap & BE_WOL_CAP) {
wol->supported |= WAKE_MAGIC;
- if (adapter->wol)
+ if (adapter->wol_en)
wol->wolopts |= WAKE_MAGIC;
- } else
+ } else {
wol->wolopts = 0;
+ }
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -730,15 +731,15 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
- if (!be_is_wol_supported(adapter)) {
+ if (!(adapter->wol_cap & BE_WOL_CAP)) {
dev_warn(&adapter->pdev->dev, "WOL not supported\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & WAKE_MAGIC)
- adapter->wol = true;
+ adapter->wol_en = true;
else
- adapter->wol = false;
+ adapter->wol_en = false;
return 0;
}
@@ -904,73 +905,21 @@ static u32 be_get_msg_level(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return -EOPNOTSUPP;
- }
-
return adapter->msg_enable;
}
-static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- int i, j;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
- u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
- for (j = 0; j < num_modes; j++) {
- if (cfgs->module[i].trace_lvl[j].mode ==
- MODE_UART)
- cfgs->module[i].trace_lvl[j].dbg_lvl =
- cpu_to_le32(level);
- }
- }
- status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
- cfgs);
- if (status)
- dev_err(&adapter->pdev->dev,
- "Message level set failed\n");
- } else {
- dev_err(&adapter->pdev->dev, "Message level get failed\n");
- }
-
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return;
-}
-
static void be_set_msg_level(struct net_device *netdev, u32 level)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return;
- }
-
if (adapter->msg_enable == level)
return;
if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
- be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
- FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
+ if (BEx_chip(adapter))
+ be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+ FW_LOG_LEVEL_DEFAULT :
+ FW_LOG_LEVEL_FATAL);
adapter->msg_enable = level;
return;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a37039d353c5..04ac9c6a0d39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -121,12 +121,6 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter) {
- return (adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED);
-}
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
@@ -258,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* Proceed further only if, User provided MAC is different
+ * from active MAC
+ */
+ if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ return 0;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -280,14 +280,15 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ adapter->if_handle, true, 0);
if (status)
goto err;
/* The MAC change did not happen, either due to lack of privilege
* or PF didn't pre-provision.
*/
- if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ if (!ether_addr_equal(addr->sa_data, mac)) {
status = -EPERM;
goto err;
}
@@ -1096,8 +1097,6 @@ static int be_vid_config(struct be_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Disabling VLAN Promiscuous mode.\n");
adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
- dev_info(&adapter->pdev->dev,
- "Re-Enabling HW VLAN filtering\n");
}
}
}
@@ -1105,12 +1104,12 @@ static int be_vid_config(struct be_adapter *adapter)
return status;
set_vlan_promisc:
- dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+ if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
+ return 0;
status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
if (!status) {
dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
adapter->flags |= BE_FLAGS_VLAN_PROMISC;
} else
dev_err(&adapter->pdev->dev,
@@ -1123,19 +1122,18 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
-
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
- status = be_vid_config(adapter);
+ adapter->vlans_added++;
- if (!status)
- adapter->vlans_added++;
- else
+ status = be_vid_config(adapter);
+ if (status) {
+ adapter->vlans_added--;
adapter->vlan_tag[vid] = 0;
+ }
ret:
return status;
}
@@ -1150,9 +1148,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= be_max_vlans(adapter))
- status = be_vid_config(adapter);
-
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
@@ -1442,12 +1438,12 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6);
}
-static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
- u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
+ u16 frag_idx = rxq->tail;
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
@@ -1459,6 +1455,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
rx_page_info->last_page_user = false;
}
+ queue_tail_inc(rxq);
atomic_dec(&rxq->used);
return rx_page_info;
}
@@ -1467,15 +1464,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
static void be_rx_compl_discard(struct be_rx_obj *rxo,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxcp->rxq_idx, rxq->len);
}
}
@@ -1486,13 +1481,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo,
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, j;
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -1526,10 +1520,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* More frags present for this completion */
- index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
@@ -1550,7 +1543,6 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1581,7 +1573,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1598,7 +1590,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
- struct be_queue_info *rxq = &rxo->q;
u16 remaining, curr_frag_len;
u16 i, j;
@@ -1610,7 +1601,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1628,7 +1619,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1639,7 +1629,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1663,8 +1653,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
rxcp->pkt_type =
@@ -1695,8 +1683,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
rxcp->pkt_type =
@@ -1921,7 +1907,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
int flush_wait = 0;
- u16 tail;
/* Consume pending rx completions.
* Wait for the flush completion (identified by zero num_rcvd)
@@ -1954,9 +1939,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
be_cq_notify(adapter, rx_cq->id, false, 0);
/* Then free posted rx buffers that were not used */
- tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
- for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(rxo, tail);
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -2891,14 +2875,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
- be_cmd_get_mac_from_list(adapter, mac, &active,
- &vf_cfg->pmac_id, 0);
-
- status = be_cmd_mac_addr_query(adapter, mac, false,
- vf_cfg->if_handle, 0);
+ status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
+ mac, vf_cfg->if_handle,
+ false, vf+1);
if (status)
return status;
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3240,6 +3221,7 @@ static int be_get_resources(struct be_adapter *adapter)
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
+ u16 profile_id;
int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
@@ -3249,6 +3231,13 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_active_profile(adapter, &profile_id);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Using profile 0x%x\n", profile_id);
+ }
+
status = be_get_resources(adapter);
if (status)
return status;
@@ -3403,11 +3392,6 @@ static int be_setup(struct be_adapter *adapter)
goto err;
be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
- */
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
status = be_mac_setup(adapter);
if (status)
@@ -3426,6 +3410,8 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
+ be_cmd_get_acpi_wol_cap(adapter);
+
be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
@@ -4295,74 +4281,22 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
-bool be_is_wol_supported(struct be_adapter *adapter)
-{
- return ((adapter->wol_cap & BE_WOL_CAP) &&
- !be_is_wol_excluded(adapter)) ? true : false;
-}
-
-u32 be_get_fw_log_level(struct be_adapter *adapter)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- u32 level = 0;
- int j;
-
- if (lancer_chip(adapter))
- return 0;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
-
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
-
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
- if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
- level = cfgs->module[0].trace_lvl[j].dbg_lvl;
- }
- }
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return level;
-}
-
static int be_get_initial_config(struct be_adapter *adapter)
{
- int status;
- u32 level;
+ int status, level;
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
- status = be_cmd_get_acpi_wol_cap(adapter);
- if (status) {
- /* in case of a failure to get wol capabillities
- * check the exclusion list to determine WOL capability */
- if (!be_is_wol_excluded(adapter))
- adapter->wol_cap |= BE_WOL_CAP;
- }
-
- if (be_is_wol_supported(adapter))
- adapter->wol = true;
-
/* Must be a power of 2 or else MODULO will BUG_ON */
adapter->be_get_temp_freq = 64;
- level = be_get_fw_log_level(adapter);
- adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ if (BEx_chip(adapter)) {
+ level = be_cmd_get_fw_log_level(adapter);
+ adapter->msg_enable =
+ level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ }
adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
@@ -4625,7 +4559,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, true);
be_intr_set(adapter, false);
@@ -4681,7 +4615,7 @@ static int be_resume(struct pci_dev *pdev)
msecs_to_jiffies(1000));
netif_device_attach(netdev);
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, false);
return 0;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 212f44b3a773..c11ecbc98149 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,7 +24,6 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -767,7 +766,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
continue;
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
@@ -1149,7 +1148,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_dbg(netdev, "tx packet too big\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1160,7 +1159,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_err(netdev, "map socket buffer failed\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0120217a16dd..3b8d6d19ff05 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -339,7 +339,8 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50bb71c663e2..d4782b42401b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1679,8 +1678,12 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
- return fec_ptp_ioctl(ndev, rq, cmd);
+ if (fep->bufdesc_ex) {
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return fec_ptp_get(ndev, rq);
+ }
return phy_mii_ioctl(phydev, rq, cmd);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 5007e4f9fff9..89ccb5b08708 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -274,7 +273,7 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*/
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -321,6 +320,20 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-EFAULT : 0;
}
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56f2f608a9f4..62f042d4aaa9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index f8b92864fc52..f5383abbf399 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index a9a00f39521a..fc5413488496 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index d37cd4ebac65..b4bf02f57d43 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 67caaacd19ec..3d3fde66c2cc 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index ac5d447ff8c4..7e69c983d12a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c4f65067cf7c..583e71ab7f51 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -20,7 +20,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b14d7904a075..ad5a5aadc7e1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -70,7 +70,6 @@
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -795,8 +794,7 @@ err_grp_init:
return err;
}
-static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -845,7 +843,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-/* Ioctl MII Interface */
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -854,7 +865,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_ioctl(dev, rq, cmd);
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
if (!priv->phydev)
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 114c58f9d8d2..52bb2b0195cc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index d3d7ede27ef1..63d234419cc1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -889,11 +888,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv,
static int gfar_check_filer_hardware(struct gfar_private *priv)
{
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 i;
- regs = priv->gfargrp[0].regs;
-
/* Check if we are in FIFO mode */
i = gfar_read(&regs->ecntrl);
i &= ECNTRL_FIFM;
@@ -927,7 +924,7 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
/* Sets the properties for arbitrary filer rule
* to the first 4 Layer 4 Bytes
*/
- regs->rbifx = 0xC0C1C2C3;
+ gfar_write(&regs->rbifx, 0xC0C1C2C3);
return 0;
}
@@ -1055,10 +1052,18 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
struct ethtool_tcpip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
- gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
- gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be16_to_cpu(value->pdst),
+ be16_to_cpu(mask->pdst),
+ RQFCR_PID_DPT, tab);
+ gfar_set_attribute(be16_to_cpu(value->psrc),
+ be16_to_cpu(mask->psrc),
+ RQFCR_PID_SPT, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
}
@@ -1067,12 +1072,17 @@ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
struct ethtool_usrip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
- gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+ be32_to_cpu(mask->l4_4_bytes),
+ RQFCR_PID_ARB, tab);
}
@@ -1139,7 +1149,41 @@ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
}
}
- gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+ gfar_set_attribute(be16_to_cpu(value->h_proto),
+ be16_to_cpu(mask->h_proto),
+ RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
}
/* Convert a rule to binary filter format of gianfar */
@@ -1153,22 +1197,21 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
u32 old_index = tab->index;
/* Check if vlan is wanted */
- if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
if (!rule->m_ext.vlan_tci)
- rule->m_ext.vlan_tci = 0xFFFF;
+ rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
vlan = RQFPR_VLN;
vlan_mask = RQFPR_VLN;
/* Separate the fields */
- id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
- id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
- cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
- cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ id = vlan_tci_vid(rule);
+ id_mask = vlan_tci_vidm(rule);
+ cfi = vlan_tci_cfi(rule);
+ cfi_mask = vlan_tci_cfim(rule);
+ prio = vlan_tci_prio(rule);
+ prio_mask = vlan_tci_priom(rule);
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
@@ -1666,10 +1709,10 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
for (i = 0; i < sizeof(flow->m_u); i++)
flow->m_u.hdata[i] ^= 0xFF;
- flow->m_ext.vlan_etype ^= 0xFFFF;
- flow->m_ext.vlan_tci ^= 0xFFFF;
- flow->m_ext.data[0] ^= ~0;
- flow->m_ext.data[1] ^= ~0;
+ flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int gfar_add_cls(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index e006a09ba899..abc28da27042 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/hrtimer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -134,7 +133,7 @@ struct gianfar_ptp_registers {
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
struct etsects {
- struct gianfar_ptp_registers *regs;
+ struct gianfar_ptp_registers __iomem *regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index acb55af7e3f3..e02dd1378751 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5548b6d00c31..72291a8904a9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -435,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
QE_CR_PROTOCOL_ETHERNET, 0);
}
-static inline int compare_addr(u8 **addr1, u8 **addr2)
-{
- return memcmp(addr1, addr2, ETH_ALEN);
-}
-
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index e79aaf9ae52a..413329eff2ff 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ef46b58cb4e9..7becab1aa3e4 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index f42f1b707733..d787fdd5db7b 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -79,7 +79,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 861fa15e1e81..17fca323c143 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -78,7 +78,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 4ceae9a30274..372fa8d1fda1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -13,7 +13,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 70074792bdef..67f342a9f65e 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -26,7 +26,6 @@
#define __IBM_NEWEMAC_CORE_H
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
@@ -1276,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
struct ibmveth_adapter *adapter;
+ struct iommu_table *tbl;
unsigned long ret;
int i;
int rxqentries = 1;
+ tbl = get_iommu_table_base(&vdev->dev);
+
/* netdev inits at probe time along with the structures we need below*/
if (netdev == NULL)
- return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
- ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1295,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret +=
adapter->rx_buff_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
- buff_size);
+ buff_size, tbl);
rxqentries += adapter->rx_buff_pool[i].size;
}
/* add the size of the receive queue entries */
- ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+ ret += IOMMU_PAGE_ALIGN(
+ rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
return ret;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066bafe057..451ba7949e15 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index abb300a31912..a21e4f5702b5 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <asm/bitops.h>
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9e..bb9f0ba9d164 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ This driver supports Intel(R) PCI Express virtual functions for the
+ Intel(R) ixgbe driver. For more information on how to identify your
+ adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
@@ -243,6 +243,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
+ select PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -259,4 +260,44 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
+config I40E_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
+config I40E_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on I40E && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config I40EVF
+ tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) XL710 and X710 virtual functions.
+ For more information on how to identify your adapter, go to the
+ Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40evf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7c..cdbbca8a3755 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index f9313b36c887..10a0f221b183 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d14eea17918..6d91933c4cdd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5790,7 +5790,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -5825,6 +5825,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5833,7 +5841,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_ioctl(netdev, ifr);
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552d..d9eb80acac4f 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
@@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \
i40e_debugfs.o \
i40e_diag.o \
i40e_txrx.o \
+ i40e_ptp.o \
i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ca9834cdfda..72dae4d97b43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -29,7 +28,7 @@
#define _I40E_H_
#include <net/tcp.h>
-#include <linux/init.h>
+#include <net/udp.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -50,11 +49,15 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
+#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_BASE_PF_SEID 16
@@ -63,7 +66,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -72,6 +75,7 @@
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
@@ -81,11 +85,13 @@
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 8
+#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x30
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -127,7 +133,9 @@ enum i40e_state_t {
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -157,6 +165,8 @@ struct i40e_fdir_data {
u8 *raw_packet;
};
+#define I40E_ETH_P_LLDP 0x88cc
+
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
@@ -191,14 +201,20 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
- u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u8 atr_sample_rate;
+ bool wol_en;
+#ifdef CONFIG_I40E_VXLAN
+ __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ u16 pending_vxlan_bitmap;
+
+#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -216,24 +232,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
-
- u16 num_tx_queues;
- u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_PTP (u64)(1 << 25)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#endif
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -247,6 +263,7 @@ struct i40e_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -270,6 +287,8 @@ struct i40e_pf {
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -287,6 +306,20 @@ struct i40e_pf {
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct sk_buff *ptp_tx_skb;
+ struct work_struct ptp_tx_work;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ u64 ptp_base_adj;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+ bool ptp_tx;
+ bool ptp_rx;
};
struct i40e_mac_filter {
@@ -441,13 +474,11 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
>> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
- >> I40E_NVM_VERSION_MID_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
>> I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
@@ -495,6 +526,7 @@ int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,13 +534,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add);
@@ -524,10 +549,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
@@ -544,6 +572,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
@@ -555,5 +584,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cdd..a50e6b3479ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,6 +30,8 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
+static void i40e_resume_aq(struct i40e_hw *hw);
+
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -43,13 +44,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
@@ -60,9 +65,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
@@ -70,21 +74,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code)
return ret_code;
- hw->aq.asq.desc = hw->aq.asq_mem.va;
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
- ret_code = i40e_allocate_virt_mem(hw, &mem,
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
- hw->aq.asq.details = mem.va;
-
return ret_code;
}
@@ -96,16 +93,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- hw->aq.arq.desc = hw->aq.arq_mem.va;
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code;
}
@@ -119,14 +111,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
-
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
- mem.va = hw->aq.asq.details;
- i40e_free_virt_mem(hw, &mem);
- hw->aq.asq.details = NULL;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -138,20 +123,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
- hw->aq.arq_mem.va = NULL;
- hw->aq.arq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
@@ -160,11 +142,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,29 +188,27 @@ unwind_alloc_arq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,35 +228,36 @@ unwind_alloc_asq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
+ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
/* only unmap if the address is non-NULL */
@@ -284,14 +265,19 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- /* now free the buffer info list */
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
@@ -299,14 +285,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
@@ -314,7 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
/**
* i40e_config_arq_regs - ARQ register configuration
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
@@ -322,14 +312,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
@@ -340,7 +334,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
/**
* i40e_init_asq - main initialization routine for ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
@@ -397,7 +391,7 @@ init_adminq_exit:
/**
* i40e_init_arq - initialize ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
@@ -454,7 +448,7 @@ init_adminq_exit:
/**
* i40e_shutdown_asq - shutdown the ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
@@ -466,10 +460,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ATQLEN1, 0);
- else
- wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +471,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_asq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex);
@@ -488,7 +479,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/**
* i40e_shutdown_arq - shutdown ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
@@ -500,10 +491,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ARQLEN1, 0);
- else
- wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +502,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_arq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex);
@@ -522,7 +510,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/**
* i40e_init_adminq - main initialization routine for Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
@@ -533,8 +521,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- u16 eetrack_lo, eetrack_hi;
i40e_status ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +551,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code)
goto init_adminq_free_asq;
- ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
- &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
- NULL);
- if (ret_code)
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
- goto init_adminq_free_arq;
- }
+ /* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
@@ -600,12 +607,15 @@ init_adminq_exit:
/**
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@@ -616,7 +626,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/**
* i40e_clean_asq - cleans Admin send queue
- * @asq: pointer to the adminq send ring
+ * @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
@@ -659,12 +669,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
@@ -674,7 +684,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
- * @opaque: pointer to info to be used in async cleanup
+ * @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
@@ -854,7 +864,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
@@ -912,7 +922,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
} else {
- memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
@@ -925,6 +935,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +962,16 @@ clean_arq_element_out:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
{
- u32 reg = 0;
-
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
- reg = hw->aq.num_asq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ATQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ATQLEN, reg);
- }
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
- reg = hw->aq.num_arq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ARQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ARQLEN, reg);
- }
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e47..993f7685a911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -32,20 +31,20 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc))[i]))
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
- void *desc; /* Descriptor ring memory */
- void *details; /* ASQ details */
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
- u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
/* used for queue tracking */
u32 head;
u32 tail;
+ u32 len;
};
/* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
};
#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
@@ -94,9 +94,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
- struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
- struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
-
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f9..7b6374a8f8da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -35,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0001
struct i40e_aq_desc {
__le16 flags;
@@ -137,10 +136,13 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -317,13 +319,15 @@ struct i40e_aqc_get_version {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
- u8 reserved[12];
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -479,7 +483,7 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
@@ -517,6 +521,16 @@ struct i40e_aqc_mac_address_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +653,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- * use the generic i40e_aqc_switch_seid descriptor format
- * use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
@@ -664,7 +680,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1026,7 +1041,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1179,33 +1196,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} v4;
struct {
u8 data[16];
- } v6;
- } ipaddr;
+ } v6;
+ } ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
- __le32 key_low;
- __le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved[14];
+ u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
@@ -1548,7 +1578,7 @@ struct i40e_aqc_module_desc {
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
@@ -1582,6 +1612,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
@@ -1914,22 +1948,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
- u8 protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
- u8 reserved[12];
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +1982,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
- u8 multiple_entries;
- u8 tunnels_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
+
struct i40e_aqc_tunnel_key_structure {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2101,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9dc..926811ad44ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975a..e7f38b57834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -43,20 +42,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
- case I40E_SFP_XL710_DEVICE_ID:
- case I40E_SFP_X710_DEVICE_ID:
- case I40E_QEMU_DEVICE_ID:
- case I40E_KX_A_DEVICE_ID:
- case I40E_KX_B_DEVICE_ID:
- case I40E_KX_C_DEVICE_ID:
- case I40E_KX_D_DEVICE_ID:
- case I40E_QSFP_A_DEVICE_ID:
- case I40E_QSFP_B_DEVICE_ID:
- case I40E_QSFP_C_DEVICE_ID:
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_VF_DEVICE_ID:
- case I40E_VF_HV_DEVICE_ID:
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -75,7 +74,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
- * @cap: pointer to adminq command descriptor
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
*
* Dumps debug log about adminq command with descriptor contents.
@@ -126,6 +126,43 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
*
@@ -142,14 +179,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
i40e_status status = 0;
u32 reg;
- hw->phy.get_link_info = true;
-
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
i40e_set_mac_type(hw);
switch (hw->mac.type) {
@@ -160,6 +189,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
break;
}
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -210,8 +254,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
cmd_data->command_flags = cpu_to_le16(flags);
- memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
- memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -240,32 +287,53 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
**/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
-
- /* Make sure it is not a multicast address */
- if (I40E_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (I40E_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
}
- return status;
+
+ return media;
}
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 10
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -275,7 +343,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
**/
i40e_status i40e_pf_reset(struct i40e_hw *hw)
{
- u32 wait_cnt = 0;
+ u32 cnt = 0;
+ u32 cnt1 = 0;
u32 reg = 0;
u32 grst_del;
@@ -285,7 +354,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -296,17 +365,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
return I40E_ERR_RESET_FAILED;
}
- /* Determine the PF number based on the PCI fn */
- hw->pf_id = (u8)hw->bus.func;
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
/* If there was a Global Reset in progress when we got here,
* we don't need to do the PF Reset
*/
- if (!wait_cnt) {
+ if (!cnt) {
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
- for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ for (; cnt; cnt--) {
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
@@ -319,6 +408,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
}
i40e_clear_pxe_mode(hw);
+
return 0;
}
@@ -335,9 +425,47 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
}
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -349,24 +477,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 gpio_val = 0;
u32 mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
-
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
@@ -376,60 +500,45 @@ u32 i40e_led_get(struct i40e_hw *hw)
/**
* i40e_led_set - set new on/off mode
* @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
**/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 gpio_val = 0;
- u32 led_mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
+ if (mode & 0xfffffff0)
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
- gpio_val |= led_mode;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
}
}
/* Admin command wrappers */
-/**
- * i40e_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
-
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
/**
* i40e_aq_set_link_restart_an
@@ -490,15 +599,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
goto aq_get_link_info_exit;
/* save off old link status information */
- memcpy(&hw->phy.link_info_old, hw_link_info,
- sizeof(struct i40e_link_status));
+ hw->phy.link_info_old = *hw_link_info;
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -519,7 +629,7 @@ aq_get_link_info_exit:
/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Add a VSI context to the hardware.
@@ -571,7 +681,8 @@ aq_add_vsi_exit:
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -665,7 +776,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
/**
* i40e_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
@@ -673,8 +784,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@@ -683,7 +794,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -707,7 +818,7 @@ aq_get_vsi_params_exit:
/**
* i40e_aq_update_vsi_params
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Update a VSI context.
@@ -717,13 +828,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -810,7 +921,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
/**
* i40e_aq_send_driver_version
* @hw: pointer to the hw struct
- * @event: driver event: driver ok, start or stop
* @dv: driver's major, minor version
* @cmd_details: pointer to command details structure or NULL
*
@@ -873,6 +983,7 @@ i40e_get_link_status_exit:
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
* @cmd_details: pointer to command details structure or NULL
*
@@ -881,7 +992,8 @@ i40e_get_link_status_exit:
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -907,6 +1019,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
cmd->veb_flags = cpu_to_le16(veb_flags);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -922,10 +1038,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* @hw: pointer to the hw struct
* @veb_seid: the SEID of the VEB to query
* @switch_id: the uplink switch id
- * @floating_veb: set to true if the VEB is floating
+ * @floating: set to true if the VEB is floating
* @statistic_index: index of the stats counter block for this VEB
* @vebs_used: number of VEB's used by function
- * @vebs_unallocated: total VEB's not reserved by any function
+ * @vebs_free: total VEB's not reserved by any function
* @cmd_details: pointer to command details structure or NULL
*
* This retrieves the parameters for a particular VEB, specified by
@@ -1059,89 +1175,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
@@ -1519,8 +1557,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
- i40e_status status = 0;
struct i40e_aq_desc desc;
+ i40e_status status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -1681,6 +1719,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_delete_element - Delete switch element
* @hw: pointer to the hw struct
* @seid: the SEID to delete from the switch
@@ -1709,6 +1804,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
* @hw: pointer to the hw struct
* @seid: seid for the physical port/switching component/vsi
@@ -1787,6 +1904,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
}
/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
* i40e_aq_query_vsi_bw_config - Query VSI BW configuration
* @hw: pointer to the hw struct
* @seid: seid of the VSI
@@ -2039,3 +2190,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return 0;
}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = cpu_to_le16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ memcpy(cmd->mac, mac_addr, ETH_ALEN);
+
+ cmd->etype = cpu_to_le16(ethtype);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->seid = cpu_to_le16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+ stats->etype_used = le16_to_cpu(resp->etype_used);
+ stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+ stats->etype_free = le16_to_cpu(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 000000000000..50730141bb7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,469 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_ieee_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (tlv) {
+ typelength = ntohs(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ if (type == I40E_TLV_TYPE_END)
+ break;/* END TLV break out */
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 000000000000..34cf1c30c7ff
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 000000000000..6e8103abfd0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, I40E_PRTDCB_GENC);
+ *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ i40e_get_pfc_delay(hw, &pfc->delay);
+
+ /* Get Requests/Indicatiosn */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+ u8 *perm_addr)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < dev->addr_len; i++)
+ perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+ for (j = 0; j < dev->addr_len; j++, i++)
+ perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = i40e_dcbnl_ieee_getets,
+ .ieee_getpfc = i40e_dcbnl_ieee_getpfc,
+ .getdcbx = i40e_dcbnl_getdcbx,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ struct dcb_app sapp;
+ u8 prio, tc_map;
+ int i;
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ dcbxcfg = &hw->local_dcbx_config;
+
+ /* Set up all the App TLVs if DCBx is negotiated */
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ prio = dcbxcfg->app[i].priority;
+ tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_config.enabled_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].protocolid;
+ sapp.priority = prio;
+ dcb_ieee_setapp(dev, &sapp);
+ }
+ }
+
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+ struct i40e_ieee_app_priority_table *app)
+{
+ struct net_device *dev = vsi->netdev;
+ struct dcb_app sapp;
+
+ if (!dev)
+ return -EINVAL;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->protocolid;
+ sapp.priority = app->priority;
+ return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding pf
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int v, err;
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] && pf->vsi[v]->netdev) {
+ err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ if (err)
+ dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ __func__, pf->vsi[v]->seid,
+ err, app->selector,
+ app->protocolid, app->priority);
+ }
+ }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->protocolid == cfg->app[i].protocolid &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding pf
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_ieee_app_priority_table app;
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ app = dcbxcfg->app[i];
+ /* The APP is not available anymore delete it */
+ if (!i40e_dcbnl_find_app(new_cfg, &app))
+ i40e_dcbnl_del_app(pf, &app);
+ }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ /* Do not setup DCB NL ops for MFP mode */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ dev->dcbnl_ops = &dcbnl_ops;
+
+ /* Set initial IEEE DCB settings */
+ i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef4cb1cf31f2..da22c3fa2c00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -192,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc, len);
+ memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len;
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc, len);
+ memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len;
i40e_dbg_dump_data_len = buflen;
@@ -362,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
}
/**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
**/
@@ -516,10 +515,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->stats.bytes,
rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
i,
- rx_ring->rx_stats.alloc_rx_page_failed,
- rx_ring->rx_stats.alloc_rx_buff_failed);
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
@@ -533,6 +532,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
{
struct i40e_adminq_ring *ring;
struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
int i;
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
/* first the send (command) ring, then the receive (event) ring */
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
}
@@ -759,27 +752,25 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
struct i40e_pf *pf, bool is_rx_ring)
{
- union i40e_rx_desc *ds;
+ struct i40e_tx_desc *txd;
+ union i40e_rx_desc *rxd;
struct i40e_ring ring;
struct i40e_vsi *vsi;
int i;
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
- dev_info(&pf->pdev->dev,
- "vsi %d not found\n", vsi_seid);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
return;
}
if (is_rx_ring)
@@ -790,22 +781,27 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
for (i = 0; i < ring.count; i++) {
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, i);
- else
- ds = (union i40e_rx_desc *)
- I40E_TX_DESC(&ring, i);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n", i,
- ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, txd->buffer_addr,
+ txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- i, ds->read.pkt_addr,
- ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
}
} else if (cnt == 3) {
if (desc_n >= ring.count || desc_n < 0) {
@@ -813,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"descriptor %d not found\n", desc_n);
return;
}
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, desc_n);
- else
- ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
- vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ txd->buffer_addr, txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
} else {
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
}
@@ -979,8 +977,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
- dev_info(&pf->pdev->dev,
- "%d: can't find veb\n", seid);
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
@@ -1006,6 +1003,22 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
+ * @pf: the pf that would be altered
+ * @flag: flag that needs enabling or disabling
+ * @enable: Enable/disable FD SD/ATR
+ **/
+static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+{
+ if (enable)
+ pf->flags |= flag;
+ else
+ pf->flags &= ~flag;
+ dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1022,8 +1035,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- u8 *print_buf_start;
- u8 *print_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1048,11 +1059,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count = cmd_buf_tmp - cmd_buf + 1;
}
- print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
- if (!print_buf_start)
- goto command_write_done;
- print_buf = print_buf_start;
-
if (strncmp(cmd_buf, "add vsi", 7) == 0) {
vsi_seid = -1;
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1104,7 +1110,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
- "add relay: vsi VSI %d not found\n", vsi_seid);
+ "add relay: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
@@ -1462,20 +1468,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "empr", 4) == 0) {
+ dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
- cnt = sscanf(&cmd_buf[4], "%x", &address);
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
goto command_write_done;
@@ -1494,7 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
- cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
goto command_write_done;
@@ -1512,7 +1522,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
address, value);
} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
- cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1539,6 +1549,118 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
}
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hx %hx %hx %hx %x %x %x %x %x %x",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+ u16 buffer_len;
+ u8 *buff;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
@@ -1564,7 +1686,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1588,19 +1710,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len = min_t(u16,
packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
- dev_info(&pf->pdev->dev, "FD raw packet:\n");
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
&fd_data.raw_packet[i]);
j += 3;
- snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
}
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+ print_hex_dump(KERN_INFO, "FD raw packet: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ fd_data.raw_packet, packet_len, true);
ret = i40e_program_fdir_filter(&fd_data, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1612,6 +1730,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
fd_data.raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+ } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1622,8 +1748,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, true, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, false, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ /* Continue and start FW LLDP anyways */
+ }
+
ret = i40e_aq_start_lldp(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1631,10 +1784,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1652,22 +1809,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (local) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1686,17 +1836,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (remote) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1721,7 +1864,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
}
} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
- u16 buffer_len, i, bytes;
+ u16 buffer_len, bytes;
u16 module;
u32 offset;
u16 *buff;
@@ -1775,16 +1918,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len);
- for (i = 0; i < buffer_len; i++) {
- if ((i % 16) == 0) {
- snprintf(print_buf, 11, "\n0x%08x: ",
- offset + i);
- print_buf += 11;
- }
- snprintf(print_buf, 5, "%04x ", buff[i]);
- print_buf += 5;
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
}
kfree(buff);
buff = NULL;
@@ -1814,8 +1951,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " fd-atr off\n");
+ dev_info(&pf->pdev->dev, " fd-atr on\n");
+ dev_info(&pf->pdev->dev, " fd-sb off\n");
+ dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
@@ -1828,9 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
command_write_done:
kfree(cmd_buf);
cmd_buf = NULL;
- kfree(print_buf_start);
- print_buf = NULL;
- print_buf_start = NULL;
return count;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde6..b2380daef8c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -68,16 +67,16 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
{I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
{I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
{I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -119,7 +118,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f4526..0b5911652084 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,10 +30,10 @@
#include "i40e_type.h"
enum i40e_lb_mode {
- I40E_LB_MODE_NONE = 0,
- I40E_LB_MODE_PHY_LOCAL,
- I40E_LB_MODE_PHY_REMOTE,
- I40E_LB_MODE_MAC_LOCAL,
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1b86138fa9e1..b1d7d8c5cb9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -109,6 +108,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define I40E_QUEUE_STATS_LEN(n) \
@@ -193,28 +194,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +277,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +352,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- int first_word, last_word;
- u16 i, eeprom_len;
- u16 *eeprom_buff;
- int ret_val = 0;
-
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
- ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
- eeprom_buff);
- if (eeprom_len == 0) {
- kfree(eeprom_buff);
- return -EACCES;
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
}
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ ret_val = i40e_aq_read_nvm(hw, 0x0,
+ eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ len,
+ (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto release_nvm;
+ }
+ }
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+ i40e_release_nvm(hw);
+ memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
kfree(eeprom_buff);
-
return ret_val;
}
@@ -368,8 +409,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
-
- return hw->nvm.sr_size * 2;
+ u32 val;
+
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * (1 << val);
+ return val;
}
static void i40e_get_drvinfo(struct net_device *netdev,
@@ -418,15 +465,19 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0]->count) &&
@@ -699,11 +750,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
static int i40e_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
- return ethtool_op_get_ts_info(dev, info);
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pf->ptp_clock)
+ info->phc_index = ptp_clock_index(pf->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
}
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
*data = 0;
else
@@ -712,36 +796,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
return *data;
}
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_reg_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_eeprom_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
return *data;
}
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "loopback test not implemented\n");
+ *data = 0;
return *data;
}
@@ -752,42 +851,38 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
- netdev_info(netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
- netdev_info(netdev, "link test starting\n");
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- netdev_info(netdev, "register test starting\n");
- if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "eeprom test starting\n");
- if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "interrupt test starting\n");
- if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "loopback test starting\n");
- if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
- netdev_info(netdev, "online test starting\n");
/* Online tests */
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline only tests, not run in online; pass by default */
@@ -795,16 +890,53 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
-
- clear_bit(__I40E_TESTING, &pf->state);
}
+
+ netif_info(pf, drv, netdev, "testing finished\n");
}
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (((1 << hw->port) & wol_nvm_bits))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -838,13 +970,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
pf->led_status = i40e_led_get(hw);
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF);
+ i40e_led_set(hw, 0xF, false);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0);
+ i40e_led_set(hw, 0x0, false);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status);
+ i40e_led_set(hw, pf->led_status, false);
break;
}
@@ -1003,6 +1135,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = 10;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1142,6 +1275,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
* i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
* a specific flow spec
@@ -1162,6 +1296,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1192,6 +1332,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
* i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
* a specific flow spec
@@ -1211,6 +1352,14 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
+ /* Dummy packet */
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1218,6 +1367,15 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1232,9 +1390,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, ret);
}
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1268,6 +1423,7 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
@@ -1287,7 +1443,11 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1356,8 +1516,8 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
fd_data.flex_off = 0;
fd_data.pctype = 0;
fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = 0;
- fd_data.fd_status = 0;
+ fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
fd_data.cnt_index = 0;
fd_data.fd_id = 0;
@@ -1400,6 +1560,7 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
return ret;
}
+
/**
* i40e_set_rxnfc - command to set RX flow classification rules
* @netdev: network interface device structure
@@ -1431,6 +1592,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 0)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1439,6 +1688,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
.get_ringparam = i40e_get_ringparam,
@@ -1455,6 +1705,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..bf2d4cc5b569 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
sd_entry->u.pd_table.pd_entry =
(struct i40e_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
- memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.pd_table.pd_page_addr = mem;
} else {
- memcpy(&sd_entry->u.bp.addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.addr = mem;
sd_entry->u.bp.sd_pd_index = sd_index;
}
/* initialize the sd entry */
@@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
if (ret_code)
goto exit;
- memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.addr = mem;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e9..0cd4701234f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -117,7 +116,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
@@ -139,7 +137,6 @@ struct i40e_hmc_info {
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
@@ -160,7 +157,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
- * @hmc_fn_id: hmc function id
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c79..d5d98fe2691d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -486,8 +485,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
/* Make one big object, a single SD */
info.count = 1;
ret_code = i40e_create_lan_hmc_object(hw, &info);
- if ((ret_code) &&
- (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
goto try_type_paged;
else if (ret_code)
goto configure_lan_hmc_out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff35006077..341de925a298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -113,8 +112,8 @@ enum i40e_hmc_lan_object_size {
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 12b0932204ba..b901371ca361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,9 @@
/* Local includes */
#include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -36,22 +38,24 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 30
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -61,16 +65,16 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
- {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
/* required last entry */
{0, }
};
@@ -354,6 +358,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
if (!vsi->tx_rings)
return stats;
@@ -416,7 +423,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i]->stats));
@@ -427,6 +434,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -461,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
{
u64 new_data;
- if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
} else {
@@ -577,10 +585,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -778,8 +787,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1065,7 +1074,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
+ is_vf, is_netdev))
return NULL;
}
}
@@ -1207,6 +1216,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1260,6 +1273,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
+ u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
@@ -1281,6 +1295,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ /* Number of queues per enabled TC */
+ num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1288,30 +1305,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
int pow, num_qps;
- vsi->tc_config.tc_info[i].qoffset = offset;
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (i == 0)
- qcount = pf->rss_size;
- else
- qcount = pf->num_tc_qps;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
default:
- qcount = vsi->alloc_queue_pairs;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = num_tc_qps;
WARN_ON(i != 0);
break;
}
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
/* find the power-of-2 of the number of queue pairs */
- num_qps = vsi->tc_config.tc_info[i].qcount;
+ num_qps = qcount;
pow = 0;
- while (num_qps &&
- ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ while (num_qps && ((1 << pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1321,7 +1333,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
(offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
- offset += vsi->tc_config.tc_info[i].qcount;
+ offset += qcount;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
@@ -1497,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1567,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1638,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1690,6 +1698,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -1771,7 +1800,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1797,13 +1825,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1824,7 +1845,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
return -ENOMEM;
}
}
+ }
+ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+ if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev)) {
@@ -1840,10 +1864,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1859,7 +1886,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1870,12 +1896,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1918,6 +1938,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -2008,8 +2032,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2032,8 +2057,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -2066,8 +2092,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i]->desc)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
i40e_free_tx_resources(vsi->tx_rings[i]);
}
@@ -2100,8 +2129,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i]->desc)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
}
@@ -2121,7 +2153,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring->atr_count = 0;
} else {
@@ -2130,6 +2162,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* initialize XPS */
if (ring->q_vector && ring->netdev &&
+ vsi->tc_config.numtc <= 1 &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -2141,8 +2174,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED));
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2176,7 +2210,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ if (vsi->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -2243,7 +2280,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
@@ -2477,6 +2517,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
@@ -2485,8 +2526,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2532,6 +2573,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
@@ -2584,23 +2638,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
- struct i40e_q_vector *q_vector = data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- pr_info("fdir ring cleaning needed\n");
-
- return IRQ_HANDLED;
-}
-
-/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -2740,20 +2777,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2793,14 +2831,31 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER)
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ else if (val == I40E_RESET_GLOBR)
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ else if (val == I40E_RESET_EMPR)
pf->empr_count++;
}
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ i40e_ptp_tx_hwtstamp(pf);
+ prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
+ }
+
+ wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
+ }
+
/* If a critical error is pending we have no choice but to reset the
* device.
* Report and mask out any remaining unexpected interrupts.
@@ -2809,22 +2864,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2832,6 +2884,94 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_irq_dynamic_enable_icr0(pf);
}
+ return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+
+
+ /* move to the next desc and buffer to clean */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ tx_ring->q_vector->v_idx + vsi->base_vector);
+ }
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
return IRQ_HANDLED;
}
@@ -2974,28 +3114,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
+ if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
- if (enable)
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
- else
+ } else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
@@ -3019,6 +3151,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -3091,9 +3226,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
- int ret;
+ int ret = 0;
/* do rx first for enable and last for disable */
if (request) {
@@ -3102,10 +3237,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
return ret;
ret = i40e_vsi_control_tx(vsi, request);
} else {
- ret = i40e_vsi_control_tx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_rx(vsi, request);
+ /* Ignore return value, we need to shutdown whatever we can */
+ i40e_vsi_control_tx(vsi, request);
+ i40e_vsi_control_rx(vsi, request);
}
return ret;
@@ -3131,7 +3265,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i]->num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
@@ -3543,7 +3678,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* Get the VSI level BW configuration per TC */
aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3754,6 +3889,149 @@ out:
}
/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "veb bw config failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = i40e_pf_get_default_tc(pf);
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ if (pf->hw.func_caps.npar_enable)
+ goto out;
+
+ /* Get the initial DCB configuration */
+ err = i40e_init_dcb(hw);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ goto out;
+
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ }
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
* i40e_up_complete - Finish the last steps of bringing up a connection
* @vsi: the VSI being configured
**/
@@ -3957,22 +4235,28 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4054,6 +4338,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -4091,6 +4393,144 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Convert/store the DCBX data from LLDPDU temporarily */
+ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+ ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ if (ret) {
+ /* Error in LLDPDU parsing return */
+ dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+ /* Overwrite the new configuration */
+ *dcbx_cfg = tmp_dcbx_cfg;
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ i40e_pf_unquiesce_all_vsi(pf);
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
@@ -4245,6 +4685,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
}
/**
@@ -4326,6 +4769,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
i40e_update_veb_stats(pf->veb[i]);
+
+ i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
/**
@@ -4336,6 +4781,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4358,7 +4804,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4366,6 +4812,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4429,6 +4878,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4454,15 +4904,23 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
case i40e_aqc_opc_lldp_update_mib:
dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ ret = i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4592,6 +5050,9 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ /* increment MSI-X count because current FW skips one */
+ pf->hw.func_caps.num_msix_vectors++;
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4603,57 +5064,89 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
/**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
* @pf: board private structure
**/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
- /* find existing or make new FDIR VSI */
+ /* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
+ break;
+ }
+ }
+
+ /* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
- return;
+ goto err_vsi;
}
new_vsi = true;
}
- WARN_ON(vsi->base_queue != I40E_FDIR_RING);
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
err = i40e_vsi_setup_tx_resources(vsi);
- if (!err)
- err = i40e_vsi_setup_rx_resources(vsi);
- if (!err)
- err = i40e_vsi_configure(vsi);
- if (!err && new_vsi) {
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ if (new_vsi) {
char int_name[IFNAMSIZ + 9];
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
dev_driver_string(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
- }
- if (!err)
+ if (err)
+ goto err_setup_rx;
err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+ }
clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ return;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+err_vsi:
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ i40e_vsi_clear(vsi);
}
/**
@@ -4673,26 +5166,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
+ return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- i40e_vc_notify_reset(pf);
+ if (i40e_check_asq_alive(hw))
+ i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@@ -4704,6 +5196,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -4731,13 +5244,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4751,8 +5257,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+#ifdef CONFIG_I40E_DCB
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+ goto end_core_reset;
+ }
+#endif /* CONFIG_I40E_DCB */
+
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4831,6 +5345,22 @@ end_core_reset:
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4911,6 +5441,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ const int vxlan_hdr_qwords = 4;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ vxlan_hdr_qwords,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4929,6 +5505,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -5006,6 +5585,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -5017,8 +5632,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
- int sz_vectors;
- int sz_rings;
int vsi_idx;
int i;
@@ -5068,22 +5681,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- /* allocate memory for ring pointers */
- sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
- vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
goto err_rings;
- }
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
- /* allocate memory for q_vector pointers */
- sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
- }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5092,8 +5692,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
ret = vsi_idx;
goto unlock_pf;
-err_vectors:
- kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
@@ -5103,6 +5701,26 @@ unlock_pf:
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5138,9 +5756,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- /* free the ring and vector containers */
- kfree(vsi->q_vectors);
- kfree(vsi->tx_rings);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5158,18 +5774,17 @@ free_vsi:
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
- if (vsi->tx_rings[0])
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
-
- return 0;
+ }
}
/**
@@ -5186,6 +5801,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
+ /* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -5289,19 +5905,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
/* Scale down if necessary, and the rings will share vectors */
@@ -5437,14 +6056,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -5513,15 +6131,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5530,16 +6148,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5568,6 +6177,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5582,6 +6219,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5593,39 +6231,47 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+ pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
- if (pf->hw.func_caps.dcb)
- pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
- else
- pf->num_tc_qps = 0;
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
- if (pf->hw.func_caps.fd) {
- /* FW/NVM is not yet fixed in this regard */
- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
- pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Enabled\n");
- pf->fdir_pf_filter_count =
- pf->hw.func_caps.fd_filters_guaranteed;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Disabled in MFP mode\n");
}
- } else {
- pf->fdir_pf_filter_count = 0;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
}
if (pf->hw.func_caps.vmdq) {
@@ -5634,12 +6280,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5647,6 +6287,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
+ dev_info(&pf->pdev->dev,
+ "Number of VFs being requested for PF[%d] = %d\n",
+ pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -5701,6 +6344,104 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5710,6 +6451,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
+ .ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -5722,6 +6464,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
};
/**
@@ -5732,6 +6478,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5781,6 +6528,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -5814,10 +6562,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
return;
- /* there is no HW VSI for FDIR */
- if (vsi->type == I40E_VSI_FDIR)
- return;
-
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
return;
}
@@ -5901,12 +6645,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
case I40E_VSI_FDIR:
- /* no queue mapping or actual HW VSI needed */
- vsi->info.valid_sections = 0;
- vsi->seid = 0;
- vsi->id = 0;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
- return 0;
break;
case I40E_VSI_VMDQ2:
@@ -6133,6 +6877,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6212,6 +7019,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (v_idx < 0)
goto err_alloc;
vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
vsi->type = type;
vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
@@ -6220,7 +7029,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+ vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
vsi->seid, ret);
@@ -6246,6 +7056,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
goto err_netdev;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
/* fall through */
case I40E_VSI_FDIR:
@@ -6503,12 +7317,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6773,11 +7589,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6790,14 +7608,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_pf_reset_stats(pf);
- /* fdir VSI must happen first to be sure it gets queue 0, but only
- * if there is enough room for the fdir VSI
- */
- if (pf->num_lan_qps > 1)
- i40e_fdir_setup(pf);
-
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6808,19 +7620,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6830,6 +7638,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_sb_setup(pf);
+
/* Setup static PF queue filter control settings */
ret = i40e_setup_pf_filter_control(pf);
if (ret) {
@@ -6848,37 +7658,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
- return ret;
-}
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
- int num_tc0;
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
- num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
- num_tc0 = rounddown_pow_of_two(num_tc0);
+ goto fc_complete;
- return num_tc0;
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
+ i40e_ptp_init(pf);
+
+ return ret;
}
/**
@@ -6887,12 +7728,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
**/
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
- int accum_tc_size;
int queues_left;
pf->num_lan_qps = 0;
- pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
- accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -6900,99 +7738,45 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
- (queues_left == 1)) {
-
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_DCB_ENABLED))) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save num_tc_qps queues for TCs 1 thru 7 and the rest
- * are set up for RSS in TC0
- */
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
- return;
- }
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- queues_left -= 1; /* save 1 queue for FD */
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
- return;
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
+ pf->num_lan_qps = pf->rss_size_max;
+ queues_left -= pf->num_lan_qps;
+ }
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save 1 queue for TCs 1 thru 7,
- * 1 queue for flow director,
- * and the rest are set up for RSS in TC0
- */
- queues_left -= 1;
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
- return;
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else {
- dev_info(&pf->pdev->dev,
- "Invalid configuration, flags=0x%08llx\n", pf->flags);
- return;
}
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
- pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
- pf->num_vf_qps));
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
@@ -7003,6 +7787,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7024,7 +7809,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -7053,6 +7838,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
@@ -7118,6 +7905,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -7142,8 +7941,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+ dev_info(&pdev->dev,
+ "warning: NVM version not supported, supported version: %02x.%02x\n",
+ I40E_CURRENT_NVM_VERSION_HI,
+ I40E_CURRENT_NVM_VERSION_LO);
+ }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7152,6 +7961,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
goto err_adminq_setup;
@@ -7178,7 +7988,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
@@ -7188,6 +7998,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto err_init_dcb;
+ }
+#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7198,6 +8016,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
@@ -7212,7 +8034,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7250,6 +8072,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_flush(hw);
}
+ pfs_found++;
+
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
@@ -7263,15 +8087,41 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
+#ifdef CONFIG_I40E_DCB
+err_init_dcb:
+#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -7313,6 +8163,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_dbg_pf_exit(pf);
+ i40e_ptp_stop(pf);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -7356,7 +8208,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7413,7 +8264,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7476,9 +8331,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
}
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7490,6 +8439,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8a..73f95b081927 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -166,15 +165,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @data: word read from the Shadow RAM.
*
* Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -211,29 +210,6 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM word.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
- *
- * Reads 16 bit word from the Shadow RAM. Each read is preceded
- * with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- i40e_status ret_code = 0;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_srctl(hw, offset, data);
- i40e_release_nvm(hw);
- }
-
- return ret_code;
-}
-
-/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer.
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -250,36 +226,25 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
i40e_status ret_code = 0;
u16 index, word;
- u32 time;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- /* Loop thru the selected region. */
- for (word = 0; word < *words; word++) {
- index = offset + word;
- ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
- if (ret_code)
- break;
- /* Check if we didn't exceeded the semaphore timeout. */
- time = rd32(hw, I40E_GLVFGEN_TIMER);
- if (time >= hw->nvm.hw_semaphore_timeout) {
- ret_code = I40E_ERR_TIMEOUT;
- hw_dbg(hw, "NVM read error: timeout.\n");
- break;
- }
- }
- /* Update the number of words read from the Shadow RAM. */
- *words = word;
- /* Release the NVM ownership. */
- i40e_release_nvm(hw);
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code)
+ break;
}
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+
return ret_code;
}
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
* This function calculate SW Checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
@@ -297,14 +262,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u32 i = 0;
/* read pointer to VPD area */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +296,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
break;
}
- ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -358,7 +323,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
- u16 checksum_local;
+ u16 checksum_local = 0;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
@@ -371,7 +336,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
- i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e3..ecd0f0b663c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc900..ed91f93ede2b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
@@ -71,8 +70,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -95,9 +92,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -106,7 +103,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +117,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -164,11 +156,19 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
@@ -179,6 +179,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -207,8 +216,6 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -222,6 +229,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
/* prototype for functions used for SW locks */
@@ -236,4 +244,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 000000000000..e33ec6c842b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PTP_TX_TIMEOUT (HZ * 15)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ /* The timer latches on the lowest register read. */
+ lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ *ts = ns_to_timespec(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 ns = timespec_to_ns(ts);
+
+ /* The timer will not update until the high register is written, so
+ * write the low register first.
+ */
+ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+ u64 timestamp)
+{
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+ u64 adj, freq, diff;
+ int neg_adj = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ smp_mb(); /* Force any pending update before accessing. */
+ adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+ freq = adj;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ if (neg_adj)
+ adj -= diff;
+ else
+ adj += diff;
+
+ wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct timespec now, then = ns_to_timespec(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+ i40e_ptp_read(pf, &now);
+ now = timespec_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_read(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_write(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
+ * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
+ * the stack in the skb.
+ */
+static void i40e_ptp_tx_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_tx_work);
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat_0;
+
+ if (!pf->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(pf->ptp_tx_start +
+ I40E_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ pf->tx_hwtstamp_timeouts++;
+ dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
+ prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
+ if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
+ i40e_ptp_tx_hwtstamp(pf);
+ else
+ schedule_work(&pf->ptp_tx_work);
+}
+
+/**
+ * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_ring *rx_ring;
+ unsigned long rx_event;
+ u32 prttsyn_stat;
+ int n;
+
+ if (pf->flags & I40E_FLAG_PTP)
+ return;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ /* Unless all four receive timestamp registers are latched, we are not
+ * concerned about a possible PTP Rx hang, so just update the timeout
+ * counter and exit.
+ */
+ if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+ pf->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event. */
+ rx_event = pf->last_rx_ptp_check;
+ for (n = 0; n < vsi->num_queue_pairs; n++) {
+ rx_ring = vsi->rx_rings[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->last_rx_ptp_check = jiffies;
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&vsi->back->pdev->dev,
+ "%s: clearing Rx timestamp hang",
+ __func__);
+ }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+ skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+ u32 prttsyn_stat, hi, lo;
+ struct i40e_hw *hw;
+ u64 ns;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * doing Tx timestamping, check if Rx timestamping is configured.
+ */
+ if (!pf->ptp_rx)
+ return;
+
+ hw = &pf->hw;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ if (!(prttsyn_stat & (1 << index)))
+ return;
+
+ lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+ hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info;
+ struct i40e_hw *hw = &pf->hw;
+ u64 incval;
+
+ hw_link_info = &hw->phy.link_info;
+
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+ switch (hw_link_info->link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ incval = I40E_PTP_10GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ incval = I40E_PTP_1GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ dev_warn(&pf->pdev->dev,
+ "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
+ __func__);
+ incval = 0;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ default:
+ incval = I40E_PTP_40GB_INCVAL;
+ break;
+ }
+
+ /* Write the new increment value into the increment register. The
+ * hardware will not update the clock until both registers have been
+ * written.
+ */
+ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+ /* Update the base adjustement value. */
+ ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &pf->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct hwtstamp_config *config = &pf->tstamp_config;
+ u32 pf_id, tsyntype, regval;
+
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Confirm that 1588 is supported on this PF. */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ pf->ptp_tx = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ pf->ptp_tx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pf->ptp_rx = false;
+ tsyntype = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out all 1588-related registers to clear and unlatch them. */
+ rd32(hw, I40E_PRTTSYN_STAT_0);
+ rd32(hw, I40E_PRTTSYN_TXTIME_H);
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable/disable the Tx timestamp interrupt based on user input. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ if (pf->ptp_tx)
+ regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ else
+ regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+ if (pf->ptp_tx)
+ regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ else
+ regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+ /* There is no simple on/off switch for Rx. To "disable" Rx support,
+ * ignore any received timestamps, rather than turn off the clock.
+ */
+ if (pf->ptp_rx) {
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+ }
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * @pf: Board private structure
+ *
+ * This function registers the device clock as a PHC. If it is successful, it
+ * starts the clock in the hardware.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+
+ strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+ pf->ptp_caps.owner = THIS_MODULE;
+ pf->ptp_caps.max_adj = 999999999;
+ pf->ptp_caps.n_ext_ts = 0;
+ pf->ptp_caps.pps = 0;
+ pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+ pf->ptp_caps.gettime = i40e_ptp_gettime;
+ pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.enable = i40e_ptp_enable;
+
+ /* Attempt to register the clock before enabling the hardware. */
+ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+ if (IS_ERR(pf->ptp_clock)) {
+ pf->ptp_clock = NULL;
+ dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+ __func__);
+ } else {
+ struct timespec ts;
+ u32 regval;
+
+ spin_lock_init(&pf->tmreg_lock);
+ INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
+
+ dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
+ netdev->name);
+ pf->flags |= I40E_FLAG_PTP;
+
+ /* Ensure the clocks are running. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ /* Set the increment value per clock tick. */
+ i40e_ptp_set_increment(pf);
+
+ /* reset the tstamp_config */
+ memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+
+ /* Set the clock value. */
+ ts = ktime_to_timespec(ktime_get_real());
+ i40e_ptp_settime(&pf->ptp_caps, &ts);
+ }
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+ pf->flags &= ~I40E_FLAG_PTP;
+ pf->ptp_tx = false;
+ pf->ptp_rx = false;
+
+ cancel_work_sync(&pf->ptp_tx_work);
+ if (pf->ptp_tx_skb) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ }
+
+ if (pf->ptp_clock) {
+ ptp_clock_unregister(pf->ptp_clock);
+ pf->ptp_clock = NULL;
+ dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28b..1d40f425acf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,6 +27,10 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
#define I40E_PF_ARQBAH 0x00080180
#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
#define I40E_GLHMC_PEQ1MAX 0x000C2054
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
#define I40E_GLHMC_PEXFFLMAX 0x000C204c
#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
#define I40E_GLHMC_PEXFMAX 0x000C2048
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1636,7 +1649,7 @@
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1786,20 @@
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2052,28 @@
#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
#define I40E_GLPCI_BYTCTH 0x0009C484
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2209,12 @@
#define I40E_GLPCI_PCIERR 0x000BE4FC
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
#define I40E_GLPCI_PKTCT 0x0009C4BC
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2425,7 @@
#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
#define I40E_PFPE_MRTEIDXMASK 0x00008600
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2504,6 @@
#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3183,6 @@
#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320
#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3219,6 @@
#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0
#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3280,8 @@
#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3431,9 @@
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3519,7 @@
#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3536,7 @@
#define I40E_GL_FCOEDDPC_MAX_INDEX 143
#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4288,10 @@
#define I40E_PFPM_APM 0x000B8080
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
#define I40E_PFPM_WUC 0x0006B200
#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4512,21 @@
#define I40E_VFMSIX_PBA 0x00002000
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4586,6 @@
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4658,13 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac573..5f9cac55aa55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f1f03bc5c729..d4bb482b1a7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -77,7 +76,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
- tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
@@ -129,15 +127,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ /* set the timestamp */
+ tx_buf->time_stamp = jiffies;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -768,7 +774,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
@@ -782,7 +788,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
@@ -792,7 +798,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
}
@@ -807,7 +813,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
@@ -860,12 +866,25 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* @skb: skb currently being received and modified
* @rx_status: status value of last descriptor in packet
* @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
u32 rx_status,
- u32 rx_error)
+ u32 rx_error,
+ u16 rx_ptype)
{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -873,13 +892,47 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* IP or L4 checksum error */
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
return;
}
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -891,13 +944,15 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
- if (ring->netdev->features & NETIF_F_RXHASH) {
- if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
- I40E_RX_DESC_FLTSTAT_RSS_HASH)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- }
- return 0;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
}
/**
@@ -918,11 +973,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u64 qword;
+ u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -938,18 +994,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
skb = rx_bi->skb;
prefetch(skb->data);
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
- >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_bi->skb = NULL;
/* This memory barrier is needed to keep us from reading
@@ -1030,13 +1088,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
@@ -1059,8 +1125,8 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
}
rx_ring->next_to_clean = i;
@@ -1173,7 +1239,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
@@ -1268,7 +1334,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -1333,7 +1399,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
return err;
}
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
@@ -1359,10 +1425,50 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
- | ((u64)cd_tso_len
- << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
- | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+ struct i40e_pf *pf;
+
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return 0;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (tx_flags & I40E_TX_FLAGS_TSO)
+ return 0;
+
+ /* only timestamp the outbound packet if the user has requested it and
+ * we are not already transmitting a packet to be timestamped
+ */
+ pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (pf->ptp_tx && !pf->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_skb = skb_get(skb);
+ } else {
+ return 0;
+ }
+
+ *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+ pf->ptp_tx_start = jiffies;
+ schedule_work(&pf->ptp_tx_work);
+
return 1;
}
@@ -1660,6 +1766,7 @@ dma_error:
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
@@ -1741,6 +1848,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tsyn;
int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -1756,9 +1864,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == __constant_htons(ETH_P_IPV6))
+ else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
@@ -1771,6 +1879,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+ if (tsyn)
+ tx_flags |= I40E_TX_FLAGS_TSYN;
+
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index db55d9947f15..d5349698e513 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,11 +24,13 @@
*
******************************************************************************/
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
-#define I40E_MAX_ITR 0x07FF
-#define I40E_MIN_ITR 0x0001
-#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
#define I40E_MAX_IRATE 0x03F
#define I40E_MIN_IRATE 0x001
#define I40E_IRATE_USEC_RESOLUTION 4
@@ -49,10 +50,43 @@
#define I40E_QUEUE_END_OF_LIST 0x7FF
-#define I40E_ITR_NONE 3
-#define I40E_RX_ITR 0
-#define I40E_TX_ITR 1
-#define I40E_PE_ITR 2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
@@ -102,6 +136,7 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -139,8 +174,8 @@ struct i40e_tx_queue_stats {
struct i40e_rx_queue_stats {
u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
@@ -214,6 +249,8 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
+ unsigned long last_rx_timestamp;
+
bool ring_active; /* is ring online or not */
/* stats structs */
@@ -262,3 +299,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02f..181a825d3160 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -36,38 +35,31 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_SFP_XL710_DEVICE_ID 0x1572
-#define I40E_SFP_X710_DEVICE_ID 0x1573
-#define I40E_QEMU_DEVICE_ID 0x1574
-#define I40E_KX_A_DEVICE_ID 0x157F
-#define I40E_KX_B_DEVICE_ID 0x1580
-#define I40E_KX_C_DEVICE_ID 0x1581
-#define I40E_KX_D_DEVICE_ID 0x1582
-#define I40E_QSFP_A_DEVICE_ID 0x1583
-#define I40E_QSFP_B_DEVICE_ID 0x1584
-#define I40E_QSFP_C_DEVICE_ID 0x1585
-#define I40E_VF_DEVICE_ID 0x154C
-#define I40E_VF_HV_DEVICE_ID 0x1571
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define I40E_IS_MULTICAST(address) \
- (bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address) \
- ((((u8 *)(address))[0] == ((u8)0xff)) && \
- (((u8 *)(address))[1] == ((u8)0xff)))
-
/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
@@ -75,8 +67,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -85,9 +75,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
@@ -101,10 +92,10 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
@@ -134,6 +125,7 @@ enum i40e_media_type {
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
@@ -171,6 +163,7 @@ struct i40e_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
};
@@ -236,9 +229,9 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -500,18 +493,26 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
};
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@@ -547,28 +548,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
@@ -852,10 +857,7 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Value 0-25 are reserved for future use */
- I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
- I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
- I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ /* Note: Values 0-28 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
@@ -864,8 +866,7 @@ enum i40e_filter_pctype {
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Value 37 is reserved for future use */
- I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ /* Note: Values 37-38 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
@@ -877,7 +878,8 @@ enum i40e_filter_pctype {
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
- /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
@@ -1014,6 +1016,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
@@ -1138,17 +1141,4 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xF
-struct i40e_lldp_variables {
- u16 length;
- u16 adminstatus;
- u16 msgfasttx;
- u16 msgtxinterval;
- u16 txparams;
- u16 timers;
- u16 crc8;
-};
-
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac7..22a1b69cd646 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
*/
struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
u8 pad[2];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 07596982a477..b9d1c1c8ca5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -70,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -102,130 +101,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
}
/**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
* i40e_config_irq_link_list
* @vf: pointer to the vf info
* @vsi_idx: index of VSI in PF struct
@@ -260,23 +135,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
goto irq_list_done;
}
tempmap = vecmap->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
vsi_queue_id));
- vsi_queue_id =
- find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
}
tempmap = vecmap->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ 1));
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
next_q = find_first_bit(&linklistmap,
@@ -307,7 +176,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES),
next_q + 1);
- if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ if (next_q <
+ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -499,7 +369,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
int ret = 0;
@@ -513,14 +382,32 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_index = vsi->idx;
vf->lan_vsi_id = vsi->id;
dev_info(&pf->pdev->dev,
- "LAN VSI index %d, VSI id %d\n",
- vsi->idx, vsi->id);
+ "VF %d assigned LAN VSI index %d, VSI id %d\n",
+ vf->vf_id, vsi->idx, vsi->id);
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- 0, true, false);
+ vf->port_vlan_id, true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF MAC addr\n");
+ f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
}
+
if (!f) {
dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
ret = -ENOMEM;
@@ -534,150 +421,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
- /* accept bcast pkts. by default */
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
- vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
- ret = -EINVAL;
- }
-
error_alloc_vsi_res:
return ret;
}
/**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
- int ret = -ENOENT;
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, reg_idx, msix_vf;
- bool rsd = false;
- u16 pf_queue_id;
- int i, j;
-
- /* warn the VF */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
- /* PF triggers VFR only when VF requests, in case of
- * VFLR, HW triggers VFR
- */
- if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
- reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
- }
-
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 4; i++) {
- /* vf reset requires driver to first reset the
- * vf & than poll the status register to make sure
- * that the requested op was completed
- * successfully
- */
- udelay(10);
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
- rsd = true;
- break;
- }
- }
-
- if (!rsd)
- dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
- vf->vf_id);
-
- /* fast disable qps */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- }
-
- /* Queue enable/disable requires driver to
- * first reset the vf & than poll the status register
- * to make sure that the requested op was completed
- * successfully
- */
- udelay(10);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- }
-
- /* clear the irq settings */
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
- else
- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- wr32(hw, reg_idx, reg);
- i40e_flush(hw);
- }
- /* disable interrupts so the VF starts in a known state */
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
- else
- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- i40e_flush(hw);
- }
-
- /* set the defaults for the rqctl & tqctl registers */
- reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
- I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
- wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
- wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
- }
-
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- i40e_flush(hw);
-
- return ret;
-}
-
-/**
* i40e_enable_vf_mappings
* @vf: pointer to the vf info
*
@@ -756,6 +504,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
static void i40e_free_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_index) {
@@ -763,6 +514,34 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -804,6 +583,111 @@ error_alloc:
return ret;
}
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ int i;
+ u32 reg;
+
+ /* warn the VF */
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 100; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* On initial reset, we won't have any queues */
+ if (vf->lan_vsi_index == 0)
+ goto complete_reset;
+
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+ /* reallocate vf resources to reset the VSI state */
+ i40e_free_vf_res(vf);
+ mdelay(10);
+ i40e_alloc_vf_res(vf);
+ i40e_enable_vf_mappings(vf);
+
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ i40e_flush(hw);
+}
+
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
@@ -816,7 +700,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
struct pci_dev *vfdev;
/* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
while (vfdev) {
/* if we don't own it we don't care */
if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
@@ -826,12 +710,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
}
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- I40E_VF_DEVICE_ID,
+ I40E_DEV_ID_VF,
vfdev);
}
return false;
}
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
/**
* i40e_free_vfs
@@ -842,17 +796,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
void i40e_free_vfs(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int i;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
if (!pf->vf)
return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, I40E_PFINT_DYN_CTL0, 0);
- i40e_flush(hw);
+ i40e_irq_dynamic_disable_icr0(pf);
+ mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
@@ -861,20 +818,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
- pf->num_alloc_vfs = 0;
- if (!i40e_vfs_are_assigned(pf))
+ if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
- else
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ }
+ i40e_disable_pf_switch_lb(pf);
+ } else {
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
+ }
/* Re-enable interrupt 0. */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
- i40e_flush(hw);
+ i40e_irq_dynamic_enable_icr0(pf);
}
#ifdef CONFIG_PCI_IOV
@@ -890,6 +852,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
struct i40e_vf *vfs;
int i, ret = 0;
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -913,11 +878,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
- ret = i40e_alloc_vf_res(&vfs[i]);
- i40e_reset_vf(&vfs[i], true);
- if (ret)
- break;
+ /* vf resources get allocated during reset */
+ i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
@@ -925,10 +887,13 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -1009,6 +974,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@@ -1028,8 +994,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
- aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
+ aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1110,10 @@ err:
* unlike other virtchnl messages, pf driver
* doesn't send the response back to the vf
**/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return -ENOENT;
-
- return i40e_reset_vf(vf, false);
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ i40e_reset_vf(vf, false);
}
/**
@@ -1291,27 +1255,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* lookout for the invalid queue index */
tempmap = map->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
tempmap = map->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1295,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1310,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1334,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1349,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
@@ -1554,7 +1393,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
- memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@@ -1563,6 +1402,40 @@ error_param:
}
/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not. Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+ struct i40e_pf *pf = vf->pf;
+ int ret = 0;
+
+ if (is_broadcast_ether_addr(macaddr) ||
+ is_zero_ether_addr(macaddr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1577,24 +1450,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
- if (is_broadcast_ether_addr(al->list[i].addr) ||
- is_zero_ether_addr(al->list[i].addr)) {
- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
- al->list[i].addr);
- aq_ret = I40E_ERR_PARAM;
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
goto error_param;
- }
}
vsi = pf->vsi[vsi_id];
@@ -1603,7 +1472,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (f) {
+ if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
@@ -1615,7 +1484,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
}
@@ -1627,7 +1496,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1645,15 +1514,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+ al->list[i].addr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
+ }
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@@ -1668,7 +1547,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1777,30 +1656,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1920,19 +1775,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw;
+ int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
+ if (local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
return ret;
}
- wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1801,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_get_vf_resources_msg(vf);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- ret = i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf_msg(vf);
+ ret = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1834,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev,
- "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -2015,19 +1873,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- if (i40e_reset_vf(vf, true))
- dev_err(&pf->pdev->dev,
- "Unable to reset the VF %d\n", vf_id);
- /* free up vf resources to destroy vsi state */
- i40e_free_vf_res(vf);
-
- /* allocate new vf resources with the default state */
- if (i40e_alloc_vf_res(vf))
- dev_err(&pf->pdev->dev,
- "Unable to allocate VF resources %d\n",
- vf_id);
-
- i40e_enable_vf_mappings(vf);
+ i40e_reset_vf(vf, true);
}
}
@@ -2183,6 +2029,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2229,6 +2076,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ dev_err(&pf->pdev->dev,
+ "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+ vf_id);
+
+ /* Check for condition where there was already a port VLAN ID
+ * filter set and now it is being deleted by setting it to zero.
+ * Before deleting all the old VLAN filters we must add new ones
+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+ * MAC addresses deleted.
+ */
+ if (!(vlan_id || qos) && vsi->info.pvid)
+ ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
if (vsi->info.pvid) {
/* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
@@ -2243,7 +2104,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
- i40e_vlan_stripping_disable(vsi);
+ i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2257,12 +2118,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
vsi->back->hw.aq.asq_last_status);
goto error_pvid;
}
+ /* Kill non-vlan MAC filters - ignore error return since
+ * there might not be any non-vlan MAC filters.
+ */
+ i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
}
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:
@@ -2294,7 +2163,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vf *vf;
@@ -2318,11 +2186,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- /* first entry of the list is the default ethernet address */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
- break;
- }
+ memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
ivi->tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf3040..cc1feee36e12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -104,7 +105,7 @@ int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 000000000000..e09be37a07a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,33 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+ i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 000000000000..5470ce95936e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,927 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40evf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (i40evf_check_asq_alive(hw))
+ i40evf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40evf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40evf_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40evf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40evf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40evf_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 000000000000..8f72c31d95cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..f7cea1bca38d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2153 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id ;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
+ u8 variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
+ u8 udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 000000000000..d8654fb9e525
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 000000000000..7b13953b28c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ struct i40e_asq_cmd_details details;
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 000000000000..cb97b3eed440
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 000000000000..17e42ca26d0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 000000000000..622f373b745d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+ __attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 000000000000..7841573a58c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 000000000000..30af953cf106
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 000000000000..7c08cc2e339b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 000000000000..ffdb01d853db
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1575 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+
+#include "i40evf.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40evf_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40evf_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error,
+ u16 rx_ptype)
+{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+ u16 rx_ptype;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ int budget_per_ring;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+ return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct skb_frag_struct *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ }
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 000000000000..10bf49e18d7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,296 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+ /* array of pointers to rings */
+ struct i40e_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 000000000000..3bffac06592f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1152 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define ETH_ALEN 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 000000000000..ccf45d04b7ef
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 000000000000..ff6529b288a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,321 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <linux/sctp.h>
+
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+ struct i40evf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ unsigned long state;
+ int base_vector;
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
+#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
+#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+ struct i40evf_adapter *adapter;
+ struct i40e_vsi *vsi;
+ struct napi_struct napi;
+ unsigned long reg_idx;
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+ u32 ring_mask;
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ int v_idx; /* vector index in list */
+ char name[IFNAMSIZ + 9];
+ cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+ __I40EVF_STARTUP, /* driver loaded, probe complete */
+ __I40EVF_FAILED, /* PF communication failed. Fatal. */
+ __I40EVF_REMOVE, /* driver is being unloaded */
+ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_SW, /* got resources, setting up structs */
+ /* Below here, watchdog is running */
+ __I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_TESTING, /* in ethtool self-test */
+ __I40EVF_RESETTING, /* in reset */
+ __I40EVF_RUNNING, /* opened, working */
+};
+
+enum i40evf_critical_section_t {
+ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work init_task;
+ struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct list_head vlan_filter_list;
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ struct list_head mac_filter_list;
+#ifdef DEBUG
+ bool detect_tx_hung;
+#endif /* DEBUG */
+
+ /* RX */
+ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+ int txd_count;
+ int rxd_count;
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+
+ u32 init_state;
+ volatile unsigned long flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+ /* flags for admin queue service task */
+ u32 aq_required;
+ u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in i40e_vf.h */
+ struct i40e_hw hw;
+
+ enum i40evf_state_t state;
+ volatile unsigned long crit_section;
+ u64 tx_busy;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+ bool link_up;
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ u16 msg_enable;
+ struct i40e_eth_stats current_stats;
+ struct i40e_vsi vsi;
+ u32 aq_wait_count;
+};
+
+struct i40evf_info {
+ enum i40e_mac_type mac;
+ unsigned int flags;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 000000000000..b0b1f4bf5ac0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,390 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+ I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+ I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+ I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+ I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ I40EVF_STAT("rx_discards", current_stats.rx_discards),
+ I40EVF_STAT("rx_errors", current_stats.rx_errors),
+ I40EVF_STAT("rx_missed", current_stats.rx_missed),
+ I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+ I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+ I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+ I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ I40EVF_STAT("tx_discards", current_stats.tx_discards),
+ I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN \
+ (((struct i40evf_adapter *) \
+ netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
+#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ /* In the future the VF will be able to query the PF for
+ * some information - for now use a dummy value
+ */
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+
+ return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return I40EVF_STATS_LEN;
+ else
+ return -ENOTSUPP;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+ char *p;
+
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+ data[i] = *(u64 *)p;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->tx_rings[j]->stats.packets;
+ data[i++] = adapter->tx_rings[j]->stats.bytes;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->rx_rings[j]->stats.packets;
+ data[i++] = adapter->rx_rings[j]->stats.bytes;
+ }
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ if (sset == ETH_SS_STATS) {
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, i40evf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+ strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[0];
+ struct i40e_ring *rx_ring = adapter->rx_rings[0];
+
+ ring->rx_max_pending = I40EVF_MAX_RXD;
+ ring->tx_max_pending = I40EVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40EVF_MIN_TXD,
+ I40EVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40EVF_MIN_RXD,
+ I40EVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->txd_count) &&
+ (new_rx_count == adapter->rxd_count))
+ return 0;
+
+ adapter->txd_count = new_tx_count;
+ adapter->rxd_count = new_rx_count;
+
+ if (netif_running(netdev))
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_q_vector *q_vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
+ vsi->work_limit = ec->tx_max_coalesced_frames;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops i40evf_ethtool_ops = {
+ .get_settings = i40evf_get_settings,
+ .get_drvinfo = i40evf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = i40evf_get_ringparam,
+ .set_ringparam = i40evf_set_ringparam,
+ .get_strings = i40evf_get_strings,
+ .get_ethtool_stats = i40evf_get_ethtool_stats,
+ .get_sset_count = i40evf_get_sset_count,
+ .get_msglevel = i40evf_get_msglevel,
+ .set_msglevel = i40evf_set_msglevel,
+ .get_coalesce = i40evf_get_coalesce,
+ .set_coalesce = i40evf_set_coalesce,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 000000000000..f5caf4419243
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2353 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+ "Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.11"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+ "Copyright (c) 2013 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+
+ return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw: pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+ char buf[512];
+ va_list argptr;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(argptr, fmt_str);
+ vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+ va_end(argptr);
+
+ /* the debug string is already formatted with a newline */
+ pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_timeout_count++;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+ synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+ int i;
+ struct i40e_hw *hw = &adapter->hw;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ synchronize_irq(adapter->msix_entries[i].vector);
+ }
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << (i - 1))) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ }
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+ u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ uint32_t dyn_ctl;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << i)) {
+ dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+ dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ }
+ }
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ i40evf_irq_enable_queues(adapter, ~0);
+
+ if (flush)
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 val;
+ u32 ena_mask;
+
+ /* handle non-queue interrupts */
+ val = rd32(hw, I40E_VFINT_ICR01);
+ ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+ val = rd32(hw, I40E_VFINT_DYN_CTL01);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule work on the private workqueue */
+ schedule_work(&adapter->adminq_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ rx_ring->vsi = &adapter->vsi;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ tx_ring->vsi = &adapter->vsi;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+ int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ /* The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == (rxr_remaining * 2)) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /* If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ i40evf_map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+ return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+ int vector, err, q_vectors;
+ int rx_int_idx = 0, tx_int_idx = 0;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+ adapter->q_vector[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ sprintf(adapter->name[0], "i40evf:mbx");
+ err = request_irq(adapter->msix_entries[0].vector,
+ &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "request_irq for msix_aq failed: %d\n", err);
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+ int i;
+ int q_vectors;
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (i = 0; i < q_vectors; i++) {
+ irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+ NULL);
+ free_irq(adapter->msix_entries[i+1].vector,
+ adapter->q_vector[i]);
+ }
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ int rx_buf_len;
+
+
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = I40E_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ }
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (vlan == f->vlan)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new VLAN filter\n",
+ __func__);
+ return NULL;
+ }
+ f->vlan = vlan;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &adapter->vlan_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+
+ return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ }
+ return;
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (i40evf_add_vlan(adapter, vid) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+
+ f = i40evf_find_filter(adapter, macaddr);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new filter\n", __func__);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ return NULL;
+ }
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+
+ list_add(&f->list, &adapter->mac_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ }
+
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_mac_filter *f;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ f = i40evf_add_filter(adapter, addr->sa_data);
+ if (f) {
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f, *ftmp;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ i40evf_add_filter(adapter, uca->addr);
+ }
+ netdev_for_each_mc_addr(mca, netdev) {
+ i40evf_add_filter(adapter, mca->addr);
+ }
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ bool found = false;
+
+ if (f->macaddr[0] & 0x01) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+ }
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ napi = &q_vector->napi;
+ napi_enable(napi);
+ }
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ i40evf_set_rx_mode(netdev);
+
+ i40evf_configure_tx(adapter);
+ i40evf_configure_rx(adapter);
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *ring = adapter->rx_rings[i];
+ i40evf_alloc_rx_buffers(ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, ring->tail);
+ }
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+ adapter->state = __I40EVF_RUNNING;
+ clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_napi_enable_all(adapter);
+
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+
+ /* remove all MAC filters from the VSI */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->remove = true;
+ }
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+
+ netif_tx_disable(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ i40evf_irq_disable(adapter);
+
+ i40evf_napi_disable_all(adapter);
+
+ netif_carrier_off(netdev);
+
+ i40evf_clean_all_tx_rings(adapter);
+ i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 0) Other (Admin Queue and link, mostly)
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = -EIO;
+ } else {
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->vsi_res)
+ return;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ if (adapter->tx_rings[i])
+ kfree_rcu(adapter->tx_rings[i], rcu);
+ adapter->tx_rings[i] = NULL;
+ adapter->rx_rings[i] = NULL;
+ }
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
+
+ tx_ring->queue_index = i;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->count = I40EVF_DEFAULT_TXD;
+ adapter->tx_rings[i] = tx_ring;
+
+ rx_ring = &tx_ring[1];
+ rx_ring->queue_index = i;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->count = I40EVF_DEFAULT_RXD;
+ adapter->rx_rings[i] = rx_ring;
+ }
+
+ return 0;
+
+err_out:
+ i40evf_free_queues(adapter);
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ int vector, v_budget;
+ int pairs = 0;
+ int err = 0;
+
+ if (!adapter->vsi_res) {
+ err = -EIO;
+ goto out;
+ }
+ pairs = adapter->vsi_res->num_queue_pairs;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter.
+ */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ adapter->netdev->real_num_tx_queues = pairs;
+ return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct i40e_q_vector *q_vector;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->vsi = &adapter->vsi;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ i40evf_napi_poll, 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ int err;
+
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = i40evf_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+ (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+ "Disabled", adapter->vsi_res->num_queue_pairs);
+
+ return 0;
+err_alloc_queues:
+ i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ schedule_work(&adapter->watchdog_task);
+ /* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ watchdog_task);
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (adapter->state < __I40EVF_DOWN)
+ goto watchdog_done;
+
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto watchdog_done;
+
+ /* check for unannounced reset */
+ if ((adapter->state != __I40EVF_RESETTING) &&
+ (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
+ __func__);
+ goto watchdog_done;
+ }
+
+ /* Process admin queue tasks. After init, everything gets done
+ * here so we don't race on the admin queue.
+ */
+ if (adapter->aq_pending)
+ goto watchdog_done;
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+ i40evf_map_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+ i40evf_add_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ i40evf_add_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+ i40evf_del_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ i40evf_del_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ i40evf_configure_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+ i40evf_enable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_request_stats(adapter);
+
+ i40evf_irq_enable(adapter, true);
+ i40evf_fire_sw_int(adapter, 0xFF);
+watchdog_done:
+ if (adapter->aq_required)
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + msecs_to_jiffies(20));
+ else
+ mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Hash type is configured by the PF - we just supply the key */
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ if (j == adapter->vsi_res->num_queue_pairs)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << 8) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, reset_task);
+ struct i40e_hw *hw = &adapter->hw;
+ int i = 0, err;
+ uint32_t rstat_val;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ udelay(500);
+
+ /* wait until the reset is complete */
+ for (i = 0; i < 20; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_COMPLETED)
+ break;
+ else
+ mdelay(100);
+ }
+ if (i == 20) {
+ /* reset never finished */
+ dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
+ __func__, rstat_val);
+ /* carry on anyway */
+ }
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_RESETTING;
+
+ /* kill and reinit the admin queue */
+ if (i40evf_shutdown_adminq(hw))
+ dev_warn(&adapter->pdev->dev,
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
+ err = i40evf_init_adminq(hw);
+ if (err)
+ dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ i40evf_map_queues(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+ if (netif_running(adapter->netdev)) {
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_irq_enable(adapter, true);
+ }
+ return;
+reset_err:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, adminq_task);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u16 pending;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
+ __func__);
+ return;
+ }
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ do {
+ ret = i40evf_clean_arq_element(hw, &event, &pending);
+ if (ret)
+ break; /* No event to process or error cleaning ARQ */
+
+ i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf,
+ event.msg_size);
+ if (pending != 0) {
+ dev_info(&adapter->pdev->dev,
+ "%s: ARQ: Pending events %d\n",
+ __func__, pending);
+ memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ }
+ } while (pending);
+
+ /* re-enable Admin queue interrupt cause */
+ i40evf_misc_irq_enable(adapter);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Tx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Rx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->state != __I40EVF_DOWN)
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /* clear any pending interrupts, may auto mask */
+ err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_irq_enable(adapter, true);
+
+ return 0;
+
+err_req_irq:
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+ i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+ i40evf_free_all_tx_resources(adapter);
+
+ return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* signal that we are down to the interrupt handler */
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ adapter->state = __I40EVF_RESETTING;
+
+ i40evf_down(adapter);
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_irq_enable(adapter, true);
+ return;
+
+err_reinit:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+ .ndo_open = i40evf_open,
+ .ndo_stop = i40evf_close,
+ .ndo_start_xmit = i40evf_xmit_frame,
+ .ndo_get_stats = i40evf_get_stats,
+ .ndo_set_rx_mode = i40evf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40evf_set_mac,
+ .ndo_change_mtu = i40evf_change_mtu,
+ .ndo_tx_timeout = i40evf_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+ u32 rstat;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ rstat = rd32(hw, I40E_VFGEN_RSTAT);
+ if (rstat == I40E_VFR_VFACTIVE)
+ return 0;
+ udelay(10);
+ }
+ return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ init_task.work);
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+ struct i40e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err, bufsz;
+
+ switch (adapter->state) {
+ case __I40EVF_STARTUP:
+ /* driver loaded, probe complete */
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
+ __func__, err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+ hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+ hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+ err = i40evf_init_adminq(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_send_api_ver(adapter);
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
+ __func__, err);
+ i40evf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ goto restart;
+ break;
+ case __I40EVF_INIT_VERSION_CHECK:
+ if (!i40evf_asq_done(hw))
+ goto err;
+
+ /* aq msg sent, awaiting reply */
+ err = i40evf_verify_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ err);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ goto restart;
+ break;
+ case __I40EVF_INIT_GET_RESOURCES:
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res) {
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ goto err;
+ }
+ }
+ err = i40evf_get_vf_config(adapter);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto restart;
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
+ __func__, err);
+ goto err_alloc;
+ }
+ adapter->state = __I40EVF_INIT_SW;
+ break;
+ default:
+ goto err_alloc;
+ }
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ goto err_alloc;
+ }
+
+ adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+ adapter->txd_count = I40EVF_DEFAULT_TXD;
+ adapter->rxd_count = I40EVF_DEFAULT_RXD;
+
+ netdev->netdev_ops = &i40evf_netdev_ops;
+ i40evf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO;
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev,
+ "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
+ random_ether_addr(adapter->hw.mac.addr);
+ }
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ goto err_sw_init;
+
+ memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ list_add(&f->list, &adapter->mac_filter_list);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ i40evf_map_rings_to_vectors(adapter);
+ i40evf_configure_rss(adapter);
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+ adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.netdev = adapter->netdev;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+
+ dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_misc_irq_enable(adapter);
+ return;
+restart:
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(50));
+ return;
+
+err_register:
+ i40evf_free_misc_irq(adapter);
+err_sw_init:
+ i40evf_reset_interrupt_capability(adapter);
+ adapter->state = __I40EVF_FAILED;
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ /* Things went into the weeds, so try again later */
+ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
+ adapter->state = __I40EVF_FAILED;
+ return; /* do not reschedule */
+ }
+ schedule_delayed_work(&adapter->init_task, HZ * 3);
+ return;
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+
+#endif
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct i40evf_adapter *adapter = NULL;
+ struct i40e_hw *hw = NULL;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = true;
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ pci_using_dac = false;
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
+ __func__, err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, i40evf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ MAX_TX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->state = __I40EVF_STARTUP;
+
+ /* Call save state here because it relies on the adapter struct. */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+ INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ schedule_delayed_work(&adapter->init_task, 10);
+
+ return 0;
+
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ i40evf_down(adapter);
+ rtnl_unlock();
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+ struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+ return err;
+ }
+ err = i40evf_request_misc_irq(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+ return err;
+ }
+
+ schedule_work(&adapter->reset_task);
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+ adapter->state = __I40EVF_REMOVE;
+
+ if (adapter->num_msix_vectors) {
+ i40evf_misc_irq_disable(adapter);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ flush_scheduled_work();
+
+ i40evf_free_misc_irq(adapter);
+
+ i40evf_reset_interrupt_capability(adapter);
+ }
+
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw);
+
+ iounmap(hw->hw_addr);
+ pci_release_regions(pdev);
+
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+ .name = i40evf_driver_name,
+ .id_table = i40evf_pci_tbl,
+ .probe = i40evf_probe,
+ .remove = i40evf_remove,
+#ifdef CONFIG_PM
+ .suspend = i40evf_suspend,
+ .resume = i40evf_resume,
+#endif
+ .shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+ int ret;
+ pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+ i40evf_driver_version);
+
+ pr_info("%s\n", i40evf_copyright);
+
+ ret = pci_register_driver(&i40evf_driver);
+ return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+ pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..e6978d79e62b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,772 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+ op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
+
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err;
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ event.msg_size = len;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error returned from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid response from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+ i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+ int pairs = adapter->vsi_res->num_queue_pairs;
+ int i, len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = kzalloc(len, GFP_ATOMIC);
+ if (!vqci) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vqci->vsi_id = adapter->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (i = 0; i < pairs; i++) {
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi++;
+ }
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ kfree(vqci);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_irq_map_info *vimi;
+ int v_idx, q_vectors, len;
+ struct i40e_q_vector *q_vector;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (adapter->num_msix_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ vimi = kzalloc(len, GFP_ATOMIC);
+ if (!vimi) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ vimi->num_vectors = adapter->num_msix_vectors;
+ /* Queue vectors first */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+ vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+ vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ }
+ /* Misc vector last - this is only for AdminQ messages */
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = 0;
+ vimi->vecmap[v_idx].txq_map = 0;
+ vimi->vecmap[v_idx].rxq_map = 0;
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
+ kfree(vimi);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+ struct i40e_virtchnl_promisc_info vpi;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* no error message, this isn't crucial */
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ /* queue maps are ignored for this message - only the vsi is used */
+ if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs)))
+ /* if the request failed, don't lock out others */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up && !netif_carrier_ok(netdev)) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else if (!adapter->link_up) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev,
+ "%s: hardware reset pending\n", __func__);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
+ break;
+
+ }
+ return;
+ }
+ if (v_opcode != adapter->current_op) {
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ __func__, adapter->current_op, v_opcode);
+ /* We're probably completely screwed at this point, but clear
+ * the current op and try to carry on....
+ */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+ if (v_retval) {
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
+ }
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS: {
+ struct i40e_eth_stats *stats =
+ (struct i40e_eth_stats *)msg;
+ adapter->net_stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ adapter->net_stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ adapter->net_stats.rx_bytes = stats->rx_bytes;
+ adapter->net_stats.tx_bytes = stats->tx_bytes;
+ adapter->net_stats.rx_errors = stats->rx_errors;
+ adapter->net_stats.tx_errors = stats->tx_errors;
+ adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.tx_dropped = stats->tx_discards;
+ adapter->current_stats = *stats;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+ /* enable transmits */
+ i40evf_irq_enable(adapter, true);
+ netif_tx_start_all_queues(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
+ break;
+ } /* switch v_opcode */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df826..06df6928f44c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -113,6 +113,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
+ * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = igb_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
@@ -189,6 +242,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ igb_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -365,6 +441,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
+ if (mac->type == e1000_82580) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ break;
+ default:
+ hw->dev_spec._82575.mas_capable = true;
+ break;
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ceda..0571b973be80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -205,6 +205,11 @@
*/
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
@@ -532,6 +537,17 @@
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 2e166b22d52b..ab99e2b582a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -533,6 +533,9 @@ struct e1000_dev_spec_82575 {
bool clear_semaphore_once;
struct e1000_sfp_flags eth_flags;
bool module_plugged;
+ u8 media_port;
+ bool media_changed;
+ bool mas_capable;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5e9ed89403aa..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
struct igb_adapter;
@@ -67,6 +68,7 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +129,9 @@ struct vf_data_storage {
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -337,8 +339,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif
@@ -355,7 +359,7 @@ struct igb_adapter {
unsigned int flags;
unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
@@ -440,7 +444,7 @@ struct igb_adapter {
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon_buff;
bool ets;
#endif
struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +454,8 @@ struct igb_adapter {
u8 rss_indir_tbl[IGB_RETA_SIZE];
unsigned long link_check_timeout;
+ int copper_tries;
+ struct e1000_info ei;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -462,6 +468,16 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+#define IGB_FLAG_MEDIA_RESET (1 << 10)
+#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+#define IGB_FLAG_MAS_ENABLE (1 << 12)
+#define IGB_FLAG_HAS_MSIX (1 << 13)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c3143da497c8..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
msleep(10);
/* Unhook test interrupt handler */
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
free_irq(adapter->msix_entries[0].vector, adapter);
else
free_irq(irq, adapter);
@@ -1983,6 +1983,10 @@ static void igb_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__IGB_TESTING, &adapter->state);
+
+ /* can't do offline tests on media switching devices */
+ if (adapter->hw.dev_spec._82575.mas_capable)
+ eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -2929,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
ch->max_combined = igb_max_channels(adapter);
/* Report info for other vector */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..e0af5bc61613 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -117,29 +117,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *igb_attr;
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->igb_hwmon_buff->n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IGB_HWMON_TYPE_LOC:
igb_attr->dev_attr.show = igb_hwmon_show_location;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IGB_HWMON_TYPE_TEMP:
igb_attr->dev_attr.show = igb_hwmon_show_temp;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IGB_HWMON_TYPE_CAUTION:
igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IGB_HWMON_TYPE_MAX:
igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -154,30 +154,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
igb_attr->dev_attr.attr.mode = S_IRUGO;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
- return rc;
+ adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+ ++adapter->igb_hwmon_buff->n_hwmon;
+
+ return 0;
}
static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
}
/* called from igb_main.c */
@@ -189,11 +175,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
- struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +187,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Don't create thermal hwmon interface if no sensors present */
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
+ if (rc)
goto exit;
- }
- adapter->i2c_client = client;
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
+ igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+ GFP_KERNEL);
+ if (!igb_hwmon) {
rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->igb_hwmon_buff = igb_hwmon;
for (i = 0; i < E1000_MAX_SENSORS; i++) {
@@ -240,11 +208,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
+ }
+
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device.\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
+ igb_hwmon->groups[0] = &igb_hwmon->group;
+ igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ client->name,
+ igb_hwmon,
+ igb_hwmon->groups);
+ if (IS_ERR(hwmon_dev)) {
+ rc = PTR_ERR(hwmon_dev);
+ goto err;
}
goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 025e5f4b7481..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igb_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* igb_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->flags & IGB_FLAG_HAS_MSI)
+ pci_disable_msi(adapter->pdev);
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
if (!msix)
goto msi_only;
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- goto msi_only;
-
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err = 0;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter);
if (!err)
goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int vector = 0, i;
free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wr32(E1000_IAM, 0);
wr32(E1000_IMC, ~0);
wrfl();
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1607,6 +1621,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ connsw = rd32(E1000_CONNSW);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (!swap_now)
+ return;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to fiber/serdes\n");
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to copper\n");
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ netdev_err(adapter->netdev,
+ "AMS: Invalid media type found, returning\n");
+ break;
+ }
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
**/
@@ -1623,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
else
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1719,6 +1800,37 @@ void igb_reinit_locked(struct igb_adapter *adapter)
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = 0;
+
+ connsw = rd32(E1000_CONNSW);
+ if (!(hw->phy.media_type == e1000_media_type_copper))
+ return ret_val;
+
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ wr32(E1000_CONNSW, connsw);
+ wrfl();
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ netdev_info(adapter->netdev,
+ "MAS: Unable to configure feature, disabling..\n");
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1830,6 +1942,16 @@ void igb_reset(struct igb_adapter *adapter)
hw->mac.ops.reset_hw(hw);
wr32(E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ /* need to resetup here after media swap */
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(&pdev->dev,
+ "Error enabling Media Auto Sense\n");
+ }
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -1976,6 +2098,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ default:
+ /* Shouldn't get here */
+ netdev_err(adapter->netdev,
+ "MAS: Invalid port configuration, returning\n");
+ break;
+ }
+}
+
+/**
* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
**/
@@ -2022,7 +2196,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
@@ -2079,11 +2252,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
+ hw->hw_addr = pci_iomap(pdev, 0, 0);
if (!hw->hw_addr)
goto err_ioremap;
@@ -2093,8 +2263,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
/* PCI config space info */
hw->vendor_id = pdev->vendor;
@@ -2350,6 +2520,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = false;
}
#endif
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+ if (hw->dev_spec._82575.mas_capable)
+ igb_init_mas(adapter);
+
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2382,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
switch (hw->mac.type) {
@@ -2470,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries || num_vfs > 7) {
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
goto out;
}
@@ -3935,6 +4110,7 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
@@ -3945,7 +4121,21 @@ static void igb_watchdog_task(struct work_struct *work)
link = false;
}
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = rd32(E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4026,8 +4216,27 @@ static void igb_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
@@ -4056,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
@@ -5977,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
@@ -7344,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
@@ -7842,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
if (netif_running(netdev))
igb_close(netdev);
- igb_clear_interrupt_scheme(adapter);
+ igb_reset_interrupt_capability(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 04bf22e5ee31..675435fc2e53 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 2224cc2edf13..1180cd59b570 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..0186ea2969fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -424,9 +424,10 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
- } else
+ } else {
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -458,9 +459,10 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
- } else
+ } else {
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -552,8 +554,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
@@ -583,6 +587,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -740,6 +749,7 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
u16 bd_number;
@@ -775,7 +785,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
- struct hwmon_buff ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
@@ -796,6 +806,7 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a636..edda6814108c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
goto out;
}
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d259dc76604e..f2e3919750ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -124,24 +124,65 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define IXGBE_FAILED_READ_REG 0xffffffffU
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
- writel((u32) (val >> 32), (addr + 4));
-#endif
+static inline bool ixgbe_removed(void __iomem *addr)
+{
+ return unlikely(!addr);
+}
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+ if (ixgbe_removed(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value))
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+#endif
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (ixgbe_removed(reg_addr))
+ return;
+ writeq(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
+
+static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
+ ixgbe_write_reg((a), (reg) + ((offset) << 2), (value))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+ ixgbe_read_reg((a), (reg) + ((offset) << 2))
+
+#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS)
#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4e7c9b098b58..043307024c4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1342,61 +1342,61 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
static const u32 test_pattern[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
- before = readl(adapter->hw.hw_addr + reg);
- writel((test_pattern[pat] & write),
- (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
e_err(drv, "pattern test reg %04X failed: got "
"0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
- writel(before, adapter->hw.hw_addr + reg);
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, adapter->hw.hw_addr + reg);
+ ixgbe_write_reg(&adapter->hw, reg, before);
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
u32 val, before;
- before = readl(adapter->hw.hw_addr + reg);
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err(drv, "set/check reg %04X test failed: got 0x%08X "
"expected 0x%08X\n", reg, (val & mask), (write & mask));
*data = reg;
- writel(before, (adapter->hw.hw_addr + reg));
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, (adapter->hw.hw_addr + reg));
- return 0;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
-#define REG_PATTERN_TEST(reg, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(drv, "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
@@ -1419,10 +1419,10 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writeable on newer MACs.
*/
- before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
- value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
- after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
+ value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
+ after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
e_err(drv, "failed STATUS register test got: 0x%08X "
"expected: 0x%08X\n", after, value);
@@ -1430,7 +1430,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
return 1;
}
/* restore previous status */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
/*
* Perform the remainder of the register test, looping through
@@ -1438,38 +1438,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ (test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -1954,6 +1963,15 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(hw, "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5bcc870f8367..6d4ada72dfd0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -64,7 +64,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.15.1-k"
+#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -278,10 +278,41 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
schedule_work(&adapter->service_task);
}
+static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ e_dev_err("Adapter removed\n");
+ ixgbe_service_event_schedule(adapter);
+}
+
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_STATUS) {
+ ixgbe_remove_adapter(hw);
+ return;
+ }
+ value = ixgbe_read_reg(hw, IXGBE_STATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbe_remove_adapter(hw);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1314,7 +1345,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -2969,7 +3000,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
/*
* set WTHRESH to encourage burst writeback, it should not be set
@@ -3308,6 +3339,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3332,6 +3365,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -3372,7 +3407,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -4572,6 +4607,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ smp_mb__before_clear_bit();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4656,6 +4692,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* lock SFP init bit to prevent race conditions with the watchdog */
while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
usleep_range(1000, 2000);
@@ -4783,7 +4821,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBE_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -5028,7 +5067,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
- if (max_vfs > 63) {
+ if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
adapter->num_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
} else {
@@ -5874,8 +5913,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
u64 eics = 0;
int i;
- /* If we're down or resetting, just bail */
+ /* If we're down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6122,8 +6162,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
- /* if interface is down do nothing */
+ /* if interface is down, removing or resetting, do nothing */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6341,8 +6382,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
- /* If we're already down or resetting, just bail */
+ /* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6350,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6362,6 +6406,15 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbe_down(adapter);
+ rtnl_unlock();
+ }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -6693,7 +6746,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbe_write_tail(tx_ring, i);
return;
dma_error:
@@ -7874,6 +7927,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -7965,8 +8019,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
- pci_sriov_set_totalvfs(pdev, 63);
skip_sriov:
#endif
@@ -8182,7 +8236,7 @@ err_register:
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8210,7 +8264,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
ixgbe_dbg_adapter_exit(adapter);
- set_bit(__IXGBE_DOWN, &adapter->state);
+ set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -8249,7 +8303,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->ixgbe_ieee_ets);
#endif
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d4a64e665398..cc3101afd29f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -27,8 +27,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 39217e5ff7dc..132557c318f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_phy.h"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 72084f70adbb..dff0977876f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -148,7 +148,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater thn
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
@@ -257,7 +257,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* PF. The PCI bus driver already checks for other values out of
* range.
*/
- if (num_vfs > 63) {
+ if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
err = -EPERM;
goto err_out;
}
@@ -631,11 +631,14 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, reg_offset, vf_shift;
u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -654,6 +657,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* force drop enable for all VF Rx queues */
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift;
@@ -684,6 +698,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers
+ * which are not cleared by an FLR
+ */
+ for (i = 0; i < q_per_pool; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+ }
+
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
if (!is_zero_ether_addr(vf_mac)) {
@@ -717,8 +740,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
}
if (adapter->vfinfo[vf].pf_set_mac &&
- memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- ETH_ALEN)) {
+ !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4713f9fc7f46..8bd29190514e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -28,6 +28,11 @@
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
+/* ixgbe driver limit the max number of VFs could be enabled to
+ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
+ */
+#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *ixgbe_attr;
- n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
- ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+ ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IXGBE_HWMON_TYPE_LOC:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IXGBE_HWMON_TYPE_TEMP:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IXGBE_HWMON_TYPE_CAUTION:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IXGBE_HWMON_TYPE_MAX:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
ixgbe_attr->dev_attr.store = NULL;
ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+ sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &ixgbe_attr->dev_attr);
+ adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
- if (rc == 0)
- ++adapter->ixgbe_hwmon_buff.n_hwmon;
+ ++adapter->ixgbe_hwmon_buff->n_hwmon;
- return rc;
+ return 0;
}
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
- if (adapter->ixgbe_hwmon_buff.device)
- hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
}
/* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
- struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
/* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
goto exit;
- /*
- * Allocation space for max attributs
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = IXGBE_MAX_SENSORS * 4;
- ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!ixgbe_hwmon->hwmon_list) {
+ ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+ GFP_KERNEL);
+ if (ixgbe_hwmon == NULL) {
rc = -ENOMEM;
- goto err;
- }
-
- ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(ixgbe_hwmon->device)) {
- rc = PTR_ERR(ixgbe_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
/*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
}
- goto exit;
+ ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+ ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
-err:
- ixgbe_sysfs_del_adapter(adapter);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ "ixgbe",
+ ixgbe_hwmon,
+ ixgbe_hwmon->groups);
+ if (IS_ERR(hwmon_dev))
+ rc = PTR_ERR(hwmon_dev);
exit:
return rc;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7c19e969576f..0d39cfc4a3bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1980,9 +1980,10 @@ enum {
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
@@ -2173,6 +2174,14 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd135..05e4f32d84f7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
@@ -277,4 +278,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..f68b78c732a8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
+ {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
+ {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -303,20 +303,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */
- tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
- }
+ err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(&tx_ring[i]);
+ }
- vfree(tx_ring);
- tx_ring = NULL;
+ vfree(tx_ring);
+ tx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -329,20 +329,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
- rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
+ err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(&rx_ring[i]);
+ }
- vfree(rx_ring);
- rx_ring = NULL;
+ vfree(rx_ring);
+ rx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
- adapter->tx_ring[i] = tx_ring[i];
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+ *adapter->tx_ring[i] = tx_ring[i];
}
adapter->tx_ring_count = new_tx_count;
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */
if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
- adapter->rx_ring[i] = rx_ring[i];
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
+ *adapter->rx_ring[i] = rx_ring[i];
}
adapter->rx_ring_count = new_rx_count;
@@ -382,7 +380,7 @@ clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i].bp_yields;
- rx_cleaned += adapter->rx_ring[i].bp_cleaned;
- rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
+ rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i].bp_yields;
- tx_cleaned += adapter->tx_ring[i].bp_cleaned;
- tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
+ tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
}
adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8971e2d0a984..54829326bb09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -46,12 +46,15 @@
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch;
- u16 length;
- u16 mapped_as_page;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbevf_rx_buffer {
@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
dma_addr_t dma;
};
+struct ixgbevf_stats {
+ u64 packets;
+ u64 bytes;
+#ifdef BP_EXTENDED_STATS
+ u64 yields;
+ u64 misses;
+ u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 csum_err;
+};
+
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
@@ -70,31 +96,27 @@ struct ixgbevf_ring {
unsigned int next_to_use;
unsigned int next_to_clean;
- int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
- u64 total_bytes;
- u64 total_packets;
- struct u64_stats_sync syncp;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
-#ifdef BP_EXTENDED_STATS
- u64 bp_yields;
- u64 bp_misses;
- u64 bp_cleaned;
-#endif
+ struct ixgbevf_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ixgbevf_tx_queue_stats tx_stats;
+ struct ixgbevf_rx_queue_stats rx_stats;
+ };
- u16 head;
- u16 tail;
+ u64 hw_csum_rx_error;
+ u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
u16 rx_buf_len;
+ int queue_index; /* needed for multiqueue queue management */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -125,8 +147,6 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -188,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->tx.ring->bp_yields++;
+ q_vector->tx.ring->stats.yields++;
#endif
} else {
/* we don't care if someone yielded */
@@ -223,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->bp_yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
} else {
/* preserve yield marks */
@@ -262,6 +282,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false;
+ q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -315,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -328,25 +348,18 @@ struct ixgbevf_adapter {
u32 eims_other;
/* TX */
- struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues;
+ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
/* RX */
- struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
+ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -356,6 +369,9 @@ struct ixgbevf_adapter {
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
+
+ struct msix_entry *msix_entries;
/* OS defined structs */
struct net_device *netdev;
@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- struct ixgbevf_hw_stats stats;
+ u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
+ struct ixgbevf_hw_stats stats;
+
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
u32 link_speed;
bool link_up;
- struct work_struct watchdog_task;
-
spinlock_t mbx_lock;
+
+ struct work_struct watchdog_task;
};
enum ixbgevf_state_t {
@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
int ethtool_ioctl(struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 92ef4cb5a8e8..9df28985eba7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.11.3-k"
+#define DRV_VERSION "2.12.1-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -95,13 +95,15 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
+ rx_ring->next_to_use = val;
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -109,7 +111,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
@@ -143,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer
- *tx_buffer_info)
-{
- if (tx_buffer_info->dma) {
- if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
- else
+ struct ixgbevf_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- tx_buffer_info->dma = 0;
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- if (tx_buffer_info->skb) {
- dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
- }
- tx_buffer_info->time_stamp = 0;
- /* tx_buffer_info must be completely set up in the transmit path */
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
#define IXGBE_MAX_TXD_PWR 14
@@ -185,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, count = 0;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = tx_ring->count / 2;
+ unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
- i = tx_ring->next_to_clean;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- eop_desc = tx_buffer_info->next_to_watch;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
do {
- bool cleaned = false;
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -212,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* clear next_to_watch to prevent false hangs */
- tx_buffer_info->next_to_watch = NULL;
+ tx_buffer->next_to_watch = NULL;
- for ( ; !cleaned; count++) {
- struct sk_buff *skb;
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- cleaned = (tx_desc == eop_desc);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
- ixgbevf_unmap_and_free_tx_resource(tx_ring,
- tx_buffer_info);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
- tx_desc->wb.status = 0;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
- eop_desc = tx_buffer_info->next_to_watch;
- } while (count < tx_ring->count);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
+
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
}
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
-
- return count < tx_ring->count;
+ return !!budget;
}
/**
@@ -341,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
@@ -349,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- ring->hw_csum_rx_good++;
}
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
**/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
int cleaned_count)
{
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
-
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
if (!bi->skb) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ if (!skb)
goto no_buffers;
- }
+
bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
+ bi->dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
dev_kfree_skb(skb);
bi->skb = NULL;
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(rx_ring->dev, "Rx DMA map failed\n");
break;
}
}
@@ -402,14 +420,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
- }
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ if (rx_ring->next_to_use != i)
+ ixgbevf_release_rx_desc(rx_ring, i);
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
@@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
skb->next = next_buffer->skb;
IXGBE_CB(skb->next)->prev = skb;
- adapter->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
@@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- ether_addr_equal(adapter->netdev->dev_addr,
+ ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
@@ -516,8 +530,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(adapter, rx_ring,
- cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
@@ -532,11 +545,11 @@ next_desc:
cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
- ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
@@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
#ifdef BP_EXTENDED_STATS
if (found)
- ring->bp_cleaned += found;
+ ring->stats.cleaned += found;
else
- ring->bp_misses++;
+ ring->stats.misses++;
#endif
if (found)
break;
@@ -848,8 +861,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->rx_ring[r_idx].next = q_vector->rx.ring;
- q_vector->rx.ring = &a->rx_ring[r_idx];
+ a->rx_ring[r_idx]->next = q_vector->rx.ring;
+ q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++;
}
@@ -858,8 +871,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->tx_ring[t_idx].next = q_vector->tx.ring;
- q_vector->tx.ring = &a->tx_ring[t_idx];
+ a->tx_ring[t_idx]->next = q_vector->tx.ring;
+ q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++;
}
@@ -1087,6 +1100,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
}
/**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+ /* disable head writeback */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+ (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* Setting PTHRESH to 32 both improves performance */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
* @adapter: board private structure
*
@@ -1094,31 +1171,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
- u64 tdba;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -1129,7 +1186,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
srrctl = IXGBE_SRRCTL_DROP_EN;
@@ -1187,7 +1244,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ ixgbevf_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ ixgbevf_configure_srrctl(adapter, reg_idx);
+
+ /* prevent DMA from exceeding buffer space available */
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ ixgbevf_rx_desc_queue_enable(adapter, ring);
+ ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
/**
@@ -1198,33 +1341,17 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- u64 rdba;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j;
- u32 rdlen;
+ int i;
ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
- (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-
- ixgbevf_configure_srrctl(adapter, j);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1366,69 +1493,54 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err;
- ixgbevf_set_rx_mode(netdev);
+ spin_lock_bh(&adapter->mbx_lock);
- ixgbevf_restore_vlan(adapter);
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
- ixgbevf_configure_tx(adapter);
- ixgbevf_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring,
- ixgbevf_desc_unused(ring));
- }
-}
+ spin_unlock_bh(&adapter->mbx_lock);
-#define IXGBEVF_MAX_RX_DESC_POLL 10
-static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- int j = adapter->rx_ring[rxr].reg_idx;
+ if (err)
+ return err;
- do {
- usleep_range(1000, 2000);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0]->reg_idx = def_q;
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
- rxr);
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* if we have a bad config abort request queue reset */
+ if (adapter->num_rx_queues != num_rx_queues) {
+ /* force mailbox timeout to prevent further messages */
+ hw->mbx.timeout = 0;
+
+ /* wait for watchdog to come around and bail us out */
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ }
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ return 0;
}
-static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring)
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ ixgbevf_configure_dcb(adapter);
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+ ixgbevf_set_rx_mode(adapter->netdev);
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ ixgbevf_restore_vlan(adapter);
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
- reg_idx);
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1493,37 +1605,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- u32 txdctl, rxdctl;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
- if (hw->mac.type == ixgbe_mac_X540_vf) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
- IXGBE_RXDCTL_RLPML_EN);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
- ixgbevf_rx_desc_queue_enable(adapter, i);
- }
ixgbevf_configure_msix(adapter);
@@ -1549,85 +1630,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
-
- /* allocate resources on the ring */
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
- kfree(rx_ring);
- return err;
- }
- }
-
- /* free the existing rings and queues */
- ixgbevf_free_all_rx_resources(adapter);
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- /* reset ring to vector mapping */
- ixgbevf_reset_q_vectors(adapter);
- ixgbevf_map_rings_to_vectors(adapter);
-
- return 0;
-}
-
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_reset_queues(adapter);
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1640,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
@@ -1659,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -1680,23 +1683,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbevf_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned long size;
@@ -1715,14 +1708,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
memset(tx_ring->tx_buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -1734,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -1746,22 +1731,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbevf_down(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl;
- int i, j;
+ int i;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
+ ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1782,10 +1766,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+ IXGBE_TXDCTL_SWFLSH);
}
netif_carrier_off(netdev);
@@ -1889,9 +1873,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ int err;
+
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1)
+ adapter->num_rx_queues = num_tcs;
}
/**
@@ -1904,40 +1907,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
{
- int i;
+ struct ixgbevf_ring *ring;
+ int rx = 0, tx = 0;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
+ for (; tx < adapter->num_tx_queues; tx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->reg_idx = tx;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
- /* reg_idx may be remapped later by DCB config */
- adapter->tx_ring[i].reg_idx = i;
- adapter->tx_ring[i].dev = &adapter->pdev->dev;
- adapter->tx_ring[i].netdev = adapter->netdev;
+ adapter->tx_ring[tx] = ring;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
- adapter->rx_ring[i].reg_idx = i;
- adapter->rx_ring[i].dev = &adapter->pdev->dev;
- adapter->rx_ring[i].netdev = adapter->netdev;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->reg_idx = rx;
+
+ adapter->rx_ring[rx] = ring;
}
return 0;
-err_rx_ring_allocation:
- kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx) {
+ kfree(adapter->tx_ring[--tx]);
+ adapter->tx_ring[tx] = NULL;
+ }
+
+ while (rx) {
+ kfree(adapter->rx_ring[--rx]);
+ adapter->rx_ring[rx] = NULL;
+ }
return -ENOMEM;
}
@@ -2128,6 +2141,17 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -2258,11 +2282,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error +=
- adapter->rx_ring[i].hw_csum_rx_error;
- adapter->hw_csum_rx_good +=
- adapter->rx_ring[i].hw_csum_rx_good;
- adapter->rx_ring[i].hw_csum_rx_error = 0;
- adapter->rx_ring[i].hw_csum_rx_good = 0;
+ adapter->rx_ring[i]->hw_csum_rx_error;
+ adapter->rx_ring[i]->hw_csum_rx_error = 0;
}
}
@@ -2340,6 +2361,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ ixgbevf_queue_reset_subtask(adapter);
+
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
/*
@@ -2408,22 +2431,22 @@ pf_has_reset:
/**
* ixgbevf_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_tx_ring(adapter, tx_ring);
+ ixgbevf_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL;
@@ -2440,23 +2463,18 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
-
+ if (adapter->tx_ring[i]->desc)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2468,13 +2486,11 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
return 0;
err:
@@ -2500,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2513,40 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
- goto alloc_failed;
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- goto alloc_failed;
- }
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ if (!rx_ring->desc)
+ goto err;
return 0;
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2565,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2577,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_rx_ring(adapter, rx_ring);
+ ixgbevf_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2609,66 +2615,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
-}
-
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
- }
-
- /* free the existing ring and queues */
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- return 0;
+ if (adapter->rx_ring[i]->desc)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2714,11 +2662,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- /* setup queue reg_idx and Rx queue count */
- err = ixgbevf_setup_queues(adapter);
- if (err)
- goto err_setup_queues;
-
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2756,7 +2699,6 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2788,6 +2730,34 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+}
+
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
@@ -2810,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct ixgbevf_tx_buffer *first,
+ u8 *hdr_len)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
@@ -2836,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM |
+ IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM;
}
/* compute header lengths */
@@ -2849,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2857,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
@@ -2865,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2888,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
- skb->protocol);
+ first->protocol);
}
break;
}
@@ -2916,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
break;
}
+
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
-
- return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int len;
- unsigned int total = skb->len;
- unsigned int offset = 0, size;
- int count = 0;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
- int i;
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
- i = tx_ring->next_to_use;
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
- len = min(skb_headlen(skb), total);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(tx_ring->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
- goto dma_error;
+ /* set segmentation enable bits for TSO/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
+ return cmd_type;
+}
- for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
- frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)skb_frag_size(frag), total);
- offset = 0;
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- tx_buffer_info->length = size;
- tx_buffer_info->dma =
- skb_frag_dma_map(tx_ring->dev, frag,
- offset, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev,
- tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->mapped_as_page = true;
+ /* use index 1 context for TSO/FSO/FCOE */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- if (total == 0)
- break;
- }
+ /* Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i = i - 1;
- tx_ring->tx_buffer_info[i].skb = skb;
+ tx_desc->read.olinfo_status = olinfo_status;
+}
- return count;
+static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ const u8 hdr_len)
+{
+ dma_addr_t dma;
+ struct sk_buff *skb = first->skb;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ u32 tx_flags = first->tx_flags;
+ __le32 cmd_type;
+ u16 i = tx_ring->next_to_use;
-dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- /* clear timestamp and dma mappings for failed tx_buffer_info map */
- tx_buffer_info->dma = 0;
- count--;
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
+ cmd_type = ixgbevf_tx_cmd_type(tx_flags);
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
- i += tx_ring->count;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- return count;
-}
+ /* record length, and DMA address */
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
-static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, unsigned int first, u32 paylen,
- u8 hdr_len)
-{
- union ixgbe_adv_tx_desc *tx_desc = NULL;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 olinfo_status = 0, cmd_type_len = 0;
- unsigned int i;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+ for (;;) {
+ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
- cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ dma += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+ }
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (likely(!data_len))
+ break;
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- /* use index 1 context for tso */
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
- }
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- /*
- * Check Context must be set if Tx switch is enabled, which it
- * always is for case where virtual functions are running
- */
- olinfo_status |= IXGBE_ADVTXD_CC;
+ size = skb_frag_size(frag);
+ data_len -= size;
- olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- i = tx_ring->next_to_use;
- while (count--) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- i++;
- if (i == tx_ring->count)
- i = 0;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+
+ frag++;
}
- tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+ /* set the timestamp */
+ first->time_stamp = jiffies;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier (wmb) to make certain all of the
+ * status bits have been updated before next_to_watch is written.
*/
wmb();
- tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
tx_ring->next_to_use = i;
}
static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -3107,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
+
return 0;
}
@@ -3121,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_tx_buffer *first;
struct ixgbevf_ring *tx_ring;
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
+ u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 hdr_len = 0;
u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+
if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3152,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += skb_shinfo(skb)->nr_frags;
#endif
if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
- adapter->tx_busy++;
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- first = tx_ring->next_to_use;
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else
+ ixgbevf_tx_csum(tx_ring, first);
- if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
- else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ ixgbevf_tx_map(tx_ring, first, hdr_len);
- ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags),
- first, skb->len, hdr_len);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+ return NETDEV_TX_OK;
- ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -3289,8 +3282,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
#ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev)
{
- struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -3349,22 +3342,22 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = &adapter->rx_ring[i];
+ ring = adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = &adapter->tx_ring[i];
+ ring = adapter->tx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
@@ -3595,9 +3588,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 4a5e3b0f712e..d74f5f4e5782 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -39,7 +39,6 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index ec94a20d7099..8f9266c64c75 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -9,8 +9,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2011 John Crispin <blogic@openwrt.org>
*/
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a49e81bdf8e8..6300fd27f2db 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
+ depends on HAS_IOMEM
select PHYLIB
---help---
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 61088a6a9424..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -33,8 +33,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2067,23 +2066,6 @@ static inline void oom_timer_wrapper(unsigned long data)
napi_schedule(&mp->napi);
}
-static void phy_reset(struct mv643xx_eth_private *mp)
-{
- int data;
-
- data = phy_read(mp->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(mp->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(mp->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void port_start(struct mv643xx_eth_private *mp)
{
u32 pscr;
@@ -2096,8 +2078,9 @@ static void port_start(struct mv643xx_eth_private *mp)
struct ethtool_cmd cmd;
mv643xx_eth_get_settings(mp->dev, &cmd);
- phy_reset(mp);
+ phy_init_hw(mp->phy);
mv643xx_eth_set_settings(mp->dev, &cmd);
+ phy_start(mp->phy);
}
/*
@@ -2293,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
-
+ if (mp->phy)
+ phy_stop(mp->phy);
free_irq(dev->irq, dev);
port_reset(mp);
@@ -2764,8 +2748,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct phy_device *phy = mp->phy;
- phy_reset(mp);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c4eeb69a5bee..fd409d76b811 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,7 +17,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d5f0d72e5e33..f418f4f20f94 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -101,16 +101,56 @@
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
#define MVNETA_INTR_NEW_CAUSE 0x25a0
-#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
#define MVNETA_INTR_NEW_MASK 0x25a4
+
+/* bits 0..7 = TXQ SENT, one bit per queue.
+ * bits 8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit 29 = OLD_REG_SUM, see old reg ?
+ * bit 30 = TX_ERR_SUM, one bit for 4 ports
+ * bit 31 = MISC_SUM, one bit for 4 ports
+ */
+#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
+#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
+
#define MVNETA_INTR_OLD_CAUSE 0x25a8
#define MVNETA_INTR_OLD_MASK 0x25ac
+
+/* Data Path Port/Queue Cause Register */
#define MVNETA_INTR_MISC_CAUSE 0x25b0
#define MVNETA_INTR_MISC_MASK 0x25b4
+
+#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
+#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
+#define MVNETA_CAUSE_PTP BIT(4)
+
+#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
+#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
+#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
+#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
+#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
+#define MVNETA_CAUSE_PRBS_ERR BIT(12)
+#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
+#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
+
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
+#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
#define MVNETA_INTR_ENABLE 0x25b8
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
-#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
+
#define MVNETA_RXQ_CMD 0x2680
#define MVNETA_RXQ_DISABLE_SHIFT 8
#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
@@ -176,9 +216,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Timer */
-#define MVNETA_TX_DONE_TIMER_PERIOD 10
-
/* Napi polling weight */
#define MVNETA_RX_POLL_WEIGHT 64
@@ -221,27 +258,25 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
-struct mvneta_stats {
+struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
- u64 packets;
- u64 bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
struct mvneta_port {
int pkt_size;
+ unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
- struct timer_list tx_done_timer;
struct net_device *dev;
u32 cause_rx_tx;
struct napi_struct napi;
- /* Flags */
- unsigned long flags;
-#define MVNETA_F_TX_DONE_TIMER_BIT 0
-
/* Napi weight */
int weight;
@@ -250,8 +285,7 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- struct mvneta_stats tx_stats;
- struct mvneta_stats rx_stats;
+ struct mvneta_pcpu_stats *stats;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -410,6 +444,8 @@ static int txq_number = 8;
static int rxq_def;
+static int rx_copybreak __read_mostly = 256;
+
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -461,21 +497,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
+ int cpu;
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
- do {
- start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
- stats->rx_packets = pp->rx_stats.packets;
- stats->rx_bytes = pp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ cpu_stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
- do {
- start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
- stats->tx_packets = pp->tx_stats.packets;
- stats->tx_bytes = pp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
@@ -487,14 +531,14 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
/* Rx descriptors helper methods */
-/* Checks whether the given RX descriptor is both the first and the
- * last descriptor for the RX packet. Each RX packet is currently
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
* received through a single RX descriptor, so not having each RX
* descriptor with its first and last bits set is an error
*/
-static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+static int mvneta_rxq_desc_is_first_last(u32 status)
{
- return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
MVNETA_RXD_FIRST_LAST_DESC;
}
@@ -570,6 +614,7 @@ mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
int rx_desc = rxq->next_desc_to_proc;
rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
return rxq->descs + rx_desc;
}
@@ -1100,17 +1145,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
txq->done_pkts_coal = value;
}
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
-{
- if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
- pp->tx_done_timer.expires = jiffies +
- msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
- add_timer(&pp->tx_done_timer);
- }
-}
-
-
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
u32 phys_addr, u32 cookie)
@@ -1204,10 +1238,10 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ if (!mvneta_rxq_desc_is_first_last(status)) {
netdev_err(pp->dev,
"bad rx status %08x (buffer oversize), size=%d\n",
- rx_desc->status, rx_desc->data_size);
+ status, rx_desc->data_size);
return;
}
@@ -1231,13 +1265,12 @@ static void mvneta_rx_error(struct mvneta_port *pp,
}
}
-/* Handle RX checksum offload */
-static void mvneta_rx_csum(struct mvneta_port *pp,
- struct mvneta_rx_desc *rx_desc,
+/* Handle RX checksum offload based on the descriptor's status */
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
- (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ if ((status & MVNETA_RXD_L3_IP4) &&
+ (status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -1246,13 +1279,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
u32 cause)
{
int queue = fls(cause) - 1;
- return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+ return &pp->txqs[queue];
}
/* Free tx queue skbuffs */
@@ -1278,15 +1314,16 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
}
/* Handle end of transmission */
-static int mvneta_txq_done(struct mvneta_port *pp,
+static void mvneta_txq_done(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done;
tx_done = mvneta_txq_sent_desc_proc(pp, txq);
- if (tx_done == 0)
- return tx_done;
+ if (!tx_done)
+ return;
+
mvneta_txq_bufs_free(pp, txq, tx_done);
txq->count -= tx_done;
@@ -1295,8 +1332,22 @@ static int mvneta_txq_done(struct mvneta_port *pp,
if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
}
+}
- return tx_done;
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pp->frag_size);
+ else
+ return kmalloc(pp->frag_size, GFP_ATOMIC);
+}
+
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ put_page(virt_to_head_page(data));
+ else
+ kfree(data);
}
/* Refill processing */
@@ -1305,22 +1356,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
{
dma_addr_t phys_addr;
- struct sk_buff *skb;
+ void *data;
- skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
- if (!skb)
+ data = mvneta_frag_alloc(pp);
+ if (!data)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ phys_addr = dma_map_single(pp->dev->dev.parent, data,
MVNETA_RX_BUF_SIZE(pp->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
+ mvneta_frag_free(pp, data);
return -ENOMEM;
}
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
-
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
return 0;
}
@@ -1374,9 +1424,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
- struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+ void *data = (void *)rx_desc->buf_cookie;
- dev_kfree_skb_any(skb);
+ mvneta_frag_free(pp, data);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
@@ -1391,6 +1441,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
{
struct net_device *dev = pp->dev;
int rx_done, rx_filled;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
/* Get number of received packets */
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -1405,53 +1457,89 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
while (rx_done < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
+ unsigned char *data;
u32 rx_status;
int rx_bytes, err;
- prefetch(rx_desc);
rx_done++;
rx_filled++;
rx_status = rx_desc->status;
- skb = (struct sk_buff *)rx_desc->buf_cookie;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (unsigned char *)rx_desc->buf_cookie;
- if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
- mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
- (u32)skb);
+ /* leave the descriptor untouched */
continue;
}
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ if (rx_bytes <= rx_copybreak) {
+ /* better copy a small frame and not unmap the DMA region */
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+ if (unlikely(!skb))
+ goto err_drop_frame;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes,
+ DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, rx_bytes),
+ data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ mvneta_rx_csum(pp, rx_status, skb);
+ napi_gro_receive(&pp->napi, skb);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* leave the descriptor and buffer untouched */
+ continue;
+ }
+
+ skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+ if (!skb)
+ goto err_drop_frame;
+
+ dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- u64_stats_update_begin(&pp->rx_stats.syncp);
- pp->rx_stats.packets++;
- pp->rx_stats.bytes += rx_bytes;
- u64_stats_update_end(&pp->rx_stats.syncp);
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_desc, skb);
+ mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
/* Refill processing */
err = mvneta_rx_refill(pp, rx_desc);
if (err) {
- netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++;
rx_filled--;
}
}
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += rcvd_pkts;
+ stats->rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
@@ -1582,25 +1670,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- u64_stats_update_begin(&pp->tx_stats.syncp);
- pp->tx_stats.packets++;
- pp->tx_stats.bytes += skb->len;
- u64_stats_update_end(&pp->tx_stats.syncp);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
- if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
- mvneta_txq_done(pp, txq);
-
- /* If after calling mvneta_txq_done, count equals
- * frags, we need to set the timer
- */
- if (txq->count == frags && frags > 0)
- mvneta_add_tx_done_timer(pp);
-
return NETDEV_TX_OK;
}
@@ -1620,33 +1700,26 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
txq->txq_get_index = 0;
}
-/* handle tx done - called from tx done timer callback */
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
- int *tx_todo)
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
- u32 tx_done = 0;
struct netdev_queue *nq;
- *tx_todo = 0;
- while (cause_tx_done != 0) {
+ while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
- if (!txq)
- break;
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, smp_processor_id());
- if (txq->count) {
- tx_done += mvneta_txq_done(pp, txq);
- *tx_todo += txq->count;
- }
+ if (txq->count)
+ mvneta_txq_done(pp, txq);
__netif_tx_unlock(nq);
cause_tx_done &= ~((1 << txq->id));
}
-
- return tx_done;
}
/* Compute crc8 of the specified address, using a unique algorithm ,
@@ -1876,14 +1949,20 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* Read cause register */
cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
- MVNETA_RX_INTR_MASK(rxq_number);
+ (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+
+ /* Release Tx descriptors */
+ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+ cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+ }
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
cause_rx_tx |= pp->cause_rx_tx;
if (rxq_number > 1) {
- while ((cause_rx_tx != 0) && (budget > 0)) {
+ while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
int count;
struct mvneta_rx_queue *rxq;
/* get rx queue number from cause_rx_tx */
@@ -1915,7 +1994,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
local_irq_save(flags);
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
local_irq_restore(flags);
}
@@ -1923,56 +2002,19 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
-/* tx done timer callback */
-static void mvneta_tx_done_timer_callback(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct mvneta_port *pp = netdev_priv(dev);
- int tx_done = 0, tx_todo = 0;
-
- if (!netif_running(dev))
- return ;
-
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
- tx_done = mvneta_tx_done_gbe(pp,
- (((1 << txq_number) - 1) &
- MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
- &tx_todo);
- if (tx_todo > 0)
- mvneta_add_tx_done_timer(pp);
-}
-
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- struct net_device *dev = pp->dev;
int i;
for (i = 0; i < num; i++) {
- struct sk_buff *skb;
- struct mvneta_rx_desc *rx_desc;
- unsigned long phys_addr;
-
- skb = dev_alloc_skb(pp->pkt_size);
- if (!skb) {
- netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+ if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+ netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
__func__, rxq->id, i, num);
break;
}
-
- rx_desc = rxq->descs + i;
- memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
- phys_addr = dma_map_single(dev->dev.parent, skb->head,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
- break;
- }
-
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
}
/* Add this number of RX descriptors as non occupied (ready to
@@ -2192,7 +2234,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
phy_start(pp->phy_dev);
netif_tx_start_all_queues(pp->dev);
@@ -2225,16 +2267,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_rx_reset(pp);
}
-/* tx timeout callback - display a message and stop/start the network device */
-static void mvneta_tx_timeout(struct net_device *dev)
-{
- struct mvneta_port *pp = netdev_priv(dev);
-
- netdev_info(dev, "tx timeout\n");
- mvneta_stop_dev(pp);
- mvneta_start_dev(pp);
-}
-
/* Return positive if MTU is valid */
static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
{
@@ -2282,6 +2314,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_cleanup_rxqs(pp);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -2429,6 +2463,8 @@ static int mvneta_open(struct net_device *dev)
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -2478,8 +2514,6 @@ static int mvneta_stop(struct net_device *dev)
free_irq(dev->irq, pp);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
- del_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
return 0;
}
@@ -2615,7 +2649,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_set_rx_mode = mvneta_set_rx_mode,
.ndo_set_mac_address = mvneta_set_mac_addr,
.ndo_change_mtu = mvneta_change_mtu,
- .ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
};
@@ -2751,6 +2784,7 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
+ int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2792,9 +2826,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
- u64_stats_init(&pp->tx_stats.syncp);
- u64_stats_init(&pp->rx_stats.syncp);
-
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2813,6 +2844,19 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ /* Alloc per-cpu stats */
+ pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ if (!pp->stats) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ stats = per_cpu_ptr(pp->stats, cpu);
+ u64_stats_init(&stats->syncp);
+ }
+
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
@@ -2828,11 +2872,6 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- pp->tx_done_timer.data = (unsigned long)dev;
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
- init_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2842,7 +2881,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_init(pp, phy_addr);
if (err < 0) {
dev_err(&pdev->dev, "can't init eth hal\n");
- goto err_unmap;
+ goto err_free_stats;
}
mvneta_port_power_up(pp, phy_mode);
@@ -2872,6 +2911,8 @@ static int mvneta_probe(struct platform_device *pdev)
err_deinit:
mvneta_deinit(pp);
+err_free_stats:
+ free_percpu(pp->stats);
err_unmap:
iounmap(pp->base);
err_clk:
@@ -2892,6 +2933,7 @@ static int mvneta_remove(struct platform_device *pdev)
unregister_netdev(dev);
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
+ free_percpu(pp->stats);
iounmap(pp->base);
irq_dispose_mapping(dev->irq);
free_netdev(dev);
@@ -2924,3 +2966,4 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fff62460185c..b358c2f6f4bd 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -19,11 +19,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -321,23 +319,6 @@ static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
wrl(pep, PHY_ADDRESS, reg_data);
}
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
- int data;
-
- data = phy_read(pep->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(pep->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(pep->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -646,7 +627,7 @@ static void eth_port_start(struct net_device *dev)
struct ethtool_cmd cmd;
pxa168_get_settings(pep->dev, &cmd);
- ethernet_phy_reset(pep);
+ phy_init_hw(pep->phy);
pxa168_set_settings(pep->dev, &cmd);
}
@@ -1383,7 +1364,6 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
{
struct phy_device *phy = pep->phy;
- ethernet_phy_reset(pep);
phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 43aa7acd84a6..55a37ae11440 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2495,7 +2495,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
- skb->rxhash = re->skb->rxhash;
+ skb_copy_hash(skb, re->skb);
skb->vlan_proto = re->skb->vlan_proto;
skb->vlan_tci = re->skb->vlan_tci;
@@ -2503,7 +2503,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
length, PCI_DMA_FROMDEVICE);
re->skb->vlan_proto = 0;
re->skb->vlan_tci = 0;
- re->skb->rxhash = 0;
+ skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
}
@@ -2723,7 +2723,7 @@ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->rxhash = le32_to_cpu(status);
+ skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
}
/* Process status response ring */
@@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(dev);
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
-
sky2_show_addr(dev);
if (hw->ports > 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index eb520ab64014..563495d8975a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,6 +6,7 @@ config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
depends on PCI
select MLX4_CORE
+ select PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 06fef5b44f77..c3ad464d0627 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -71,9 +71,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
return obj;
}
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
- mlx4_bitmap_free_range(bitmap, obj, 1);
+ mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
@@ -118,11 +118,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
+ if (!use_rr) {
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ }
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1e9970d2f0f3..0d02fba94536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1371,6 +1371,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
},
+ {
+ .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 22fcbe78311c..0487121e4a0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,7 +34,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
@@ -187,7 +186,7 @@ err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
- mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
@@ -217,7 +216,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index fd6441071319..abaf6bb22416 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -42,6 +42,10 @@ int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
int port_up = 0;
int err = 0;
+ if (priv->hwtstamp_config.tx_type == tx_type &&
+ priv->hwtstamp_config.rx_filter == rx_filter)
+ return 0;
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
@@ -103,19 +107,191 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
+ unsigned long flags;
u64 nsec;
+ read_lock_irqsave(&mdev->clock_lock, flags);
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+ read_unlock_irqrestore(&mdev->clock_lock, flags);
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = ns_to_ktime(nsec);
}
+/**
+ * mlx4_en_remove_timestamp - disable PTP device
+ * @mdev: board private structure
+ *
+ * Stop the PTP support.
+ **/
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+{
+ if (mdev->ptp_clock) {
+ ptp_clock_unregister(mdev->ptp_clock);
+ mdev->ptp_clock = NULL;
+ mlx4_info(mdev, "removed PHC\n");
+ }
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+ mdev->overflow_period);
+ unsigned long flags;
+
+ if (timeout) {
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+ mdev->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+ mult = mdev->nominal_c_mult;
+ adj = mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ now = timecounter_read(&mdev->clock);
+ now += delta;
+ timecounter_init(&mdev->clock, &mdev->cycles, now);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ ns = timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ u64 ns = timespec_to_ns(ts);
+ unsigned long flags;
+
+ /* reset the timecounter */
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_init(&mdev->clock, &mdev->cycles, ns);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+ .gettime = mlx4_en_phc_gettime,
+ .settime = mlx4_en_phc_settime,
+ .enable = mlx4_en_phc_enable,
+};
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
+ unsigned long flags;
u64 ns;
+ rwlock_init(&mdev->clock_lock);
+
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
@@ -127,9 +303,12 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
mdev->cycles.shift = 14;
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ mdev->nominal_c_mult = mdev->cycles.mult;
+ write_lock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
@@ -137,15 +316,18 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
-}
-void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
-{
- bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ /* Configure the PHC */
+ mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
- if (timeout) {
- timecounter_read(&mdev->clock);
- mdev->last_overflow_check = jiffies;
+ mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(mdev->ptp_clock)) {
+ mdev->ptp_clock = NULL;
+ mlx4_err(mdev, "ptp_clock_register failed\n");
+ } else {
+ mlx4_info(mdev, "registered PHC clock\n");
}
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3a098cc4d349..70e95324a97d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (!cq->is_tx) {
+ if (cq->is_tx) {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+ NAPI_POLL_WEIGHT);
+ } else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
- napi_enable(&cq->napi);
}
+ napi_enable(&cq->napi);
+
return 0;
}
@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
+ napi_disable(&cq->napi);
if (!cq->is_tx) {
- napi_disable(&cq->napi);
napi_hash_del(&cq->napi);
synchronize_rcu();
- netif_napi_del(&cq->napi);
}
+ netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0596f9f85a0e..3e8d33605fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1193,6 +1193,9 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+
+ if (mdev->ptp_clock)
+ info->phc_index = ptp_clock_index(mdev->ptp_clock);
}
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0d087b03a7b0..d357bf5a4686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
+ case MLX4_DEV_EVENT_SLAVE_INIT:
+ case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
@@ -196,6 +199,9 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a112a6b..fad45316200a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
memset(&dst_mac[ETH_ALEN], 0, 2);
}
+
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+ int qpn, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ rule.port = priv->port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ if (err) {
+ en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
+ return err;
+ }
+ en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
+ return 0;
+}
+
+
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
@@ -585,6 +632,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
if (err)
goto steer_err;
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
@@ -599,6 +651,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
steer_err:
@@ -642,6 +697,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
}
}
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -782,7 +842,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
@@ -797,7 +857,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
@@ -1044,6 +1104,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to detach multicast address\n");
+ if (mclist->tunnel_reg_id) {
+ err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to detach multicast address\n");
+ }
+
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
@@ -1061,6 +1127,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to attach multicast address\n");
+ err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
+ &mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to attach multicast address\n");
}
}
}
@@ -1598,6 +1668,15 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto tx_err;
+ }
+ }
+
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
@@ -1910,8 +1989,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX, node))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE, node))
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ priv->base_tx_qpn + i,
+ prof->tx_ring_size, TXBB_SIZE,
+ node, i))
goto err;
}
@@ -2025,7 +2106,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -2084,11 +2165,21 @@ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+ sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ return mlx4_en_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2154,6 +2245,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+
+#define PORT_ID_BYTE_LEN 8
+static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int i;
+ u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
+
+ if (!phys_port_id)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(phys_port_id);
+ for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
+ ppid->id[i] = phys_port_id & 0xff;
+ phys_port_id >>= 8;
+ }
+ return 0;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2179,6 +2291,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2207,6 +2320,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2365,6 +2479,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2394,6 +2515,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto out;
+ }
+ }
+
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index d3f508697a3d..f1a5500ff72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -68,6 +68,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
+
+ if (!is_tx && !rss &&
+ (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
+ en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
+ context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 07a1d0fbae47..890922c1c8ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -631,6 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int ip_summed;
int factor = priv->cqe_factor;
u64 timestamp;
+ bool l2_tunnel;
if (!priv->port_up)
return 0;
@@ -709,6 +710,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
@@ -721,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - not an IP fragment
* - no LLS polling in progress
*/
- if (!mlx4_en_cq_ll_polling(cq) &&
+ if (!mlx4_en_cq_busy_polling(cq) &&
(dev->features & NETIF_F_GRO)) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb)
@@ -738,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l2_tunnel)
+ gro_skb->encapsulation = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -747,7 +752,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (dev->features & NETIF_F_RXHASH)
- gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
@@ -788,8 +795,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (l2_tunnel)
+ skb->encapsulation = 1;
+
if (dev->features & NETIF_F_RXHASH)
- skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) &&
@@ -804,8 +816,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_mark_napi_id(skb, &cq->napi);
- /* Push it up the stack */
- netif_receive_skb(skb);
+ if (!mlx4_en_cq_busy_polling(cq))
+ napi_gro_receive(&cq->napi, skb);
+ else
+ netif_receive_skb(skb);
next:
for (nr = 0; nr < priv->num_frags; nr++)
@@ -1053,6 +1067,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
+ rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
+ }
+
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a7fcd593b2db..8e8a7eb43a2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
+#include <linux/ip.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -55,7 +56,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
- u16 stride, int node)
+ u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
@@ -140,6 +141,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = true;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+ if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+ cpumask_set_cpu(queue_index, &ring->affinity_mask);
*pring = ring;
return 0;
@@ -206,6 +211,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
+ if (!user_prio && cpu_online(ring->queue_index))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
return err;
}
@@ -317,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
return tx_info->nr_txbb;
}
@@ -354,7 +362,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+static int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +382,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
u32 bytes = 0;
int factor = priv->cqe_factor;
u64 timestamp = 0;
+ int done = 0;
if (!priv->port_up)
- return;
+ return 0;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -383,7 +394,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
- cons_index & size)) {
+ cons_index & size) && (done < budget)) {
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -421,7 +432,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
- } while (ring_index != new_index);
+ } while ((++done < budget) && (ring_index != new_index));
++cons_index;
index = cons_index & size_mask;
@@ -447,6 +458,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++;
}
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +466,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- mlx4_en_process_tx_cq(cq->dev, cq);
- mlx4_en_arm_cq(priv, cq);
+ if (priv->port_up)
+ napi_schedule(&cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
}
+/* TX CQ polling - called by NAPI */
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_tx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done < budget) {
+ /* Done for now */
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return done;
+ }
+ return budget;
+}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -528,7 +561,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int real_size;
if (skb_is_gso(skb)) {
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->encapsulation)
+ *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ else
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@ -828,6 +864,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->inl = 1;
}
+ if (skb->encapsulation) {
+ struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+ if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
+ else
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+ }
+
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c9cdb2a2c596..8992b38578d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -963,7 +962,7 @@ err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
@@ -1018,7 +1017,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
eq->page_list[i].map);
kfree(eq->page_list);
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 194928214606..91b69ff4b4a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -134,7 +134,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
- [8] = "Dynamic QP updates support"
+ [8] = "Dynamic QP updates support",
+ [9] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -207,25 +208,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
-#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
-#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
+#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
+#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
-#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
- field = 0;
- /* ensure force vlan and force mac bits are not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- /* ensure that phy_wqe_gid bit is not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ /* Set nic_info bit to mark new fields support */
+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
field = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+ MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +395,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
+ MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
err = -EPROTONOSUPPORT;
@@ -433,6 +437,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
+ MLX4_GET(func_cap->phys_port_id, outbox,
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -513,6 +521,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
+#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -529,6 +538,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
+#define QUERY_DEV_CAP_VXLAN 0x9e
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -603,6 +613,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
@@ -694,6 +707,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ if (field & 1<<3)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -842,6 +858,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ /* For guests, disable vxlan tunneling */
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ field &= 0xf7;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -860,6 +881,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, field,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
}
+
+ /* turn off ipoib managed steering for guests */
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ field &= ~0x80;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+
return 0;
}
@@ -1267,6 +1294,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
@@ -1425,6 +1453,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* set parser VXLAN attributes */
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
+ u8 parser_params = 0;
+ MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
+ }
+
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
MLX4_CMD_NATIVE);
@@ -1713,6 +1747,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
+int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+{
+ u8 port;
+ u32 *outbox;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ u32 guid_hi, guid_lo;
+ int err, ret = 0;
+#define MOD_STAT_CFG_PORT_OFFSET 8
+#define MOD_STAT_CFG_GUID_H 0X14
+#define MOD_STAT_CFG_GUID_L 0X1c
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Fail to get port %d uplink guid\n",
+ port);
+ ret = err;
+ } else {
+ MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
+ MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
+ dev->caps.phys_port_id[port] = (u64)guid_lo |
+ (u64)guid_hi << 32;
+ }
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index a0a368b7c939..6811ee00ba7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -140,6 +140,8 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u8 physical_port;
u8 port_flags;
+ u8 flags1;
+ u64 phys_port_id;
};
struct mlx4_adapter {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 01fc6515384d..d711158b0d4b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" To activate device managed"
" flow steering when available, set to -1");
-static bool enable_64b_cqe_eqe;
+static bool enable_64b_cqe_eqe = true;
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
- "Enable 64 byte CQEs/EQEs when the FW supports this");
+ "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
#define HCA_GLOBAL_CAP_MASK 0
@@ -388,6 +388,84 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+
+static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
+ err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
+ return err1 ? err1 :
+ err2 ? err2 : -EINVAL;
+ }
+ return 0;
+}
+
+static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+ int err;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
+ if (err) {
+ mlx4_warn(dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+ if (err || speed == PCI_SPEED_UNKNOWN ||
+ width == PCIE_LNK_WIDTH_UNKNOWN) {
+ mlx4_warn(dev,
+ "Unable to determine PCI device chain minimum BW\n");
+ return;
+ }
+
+ if (width != width_cap || speed != speed_cap)
+ mlx4_warn(dev,
+ "PCIe BW is different than device's capability\n");
+
+ mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ return;
+}
+
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
@@ -606,6 +684,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ dev->caps.phys_port_id[i] = func_cap.phys_port_id;
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
@@ -1443,6 +1522,19 @@ static void choose_steering_mode(struct mlx4_dev *dev,
mlx4_log_num_mgm_entry_size);
}
+static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
+ else
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
+
+ mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
+ == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1483,6 +1575,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
choose_steering_mode(dev, &dev_cap);
+ choose_tunnel_offload_mode(dev, &dev_cap);
+
+ err = mlx4_get_phys_port_id(dev);
+ if (err)
+ mlx4_err(dev, "Fail to get physical port id\n");
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -1654,7 +1751,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2287,6 +2384,12 @@ slave_start:
goto err_mfunc;
}
+ /* check if the device is functioning at its maximum possible speed.
+ * No return code for this call, just warn the user in case of PCI
+ * express device capabilities are under-satisfied by the bus.
+ */
+ mlx4_check_pcie_caps(dev);
+
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index acf9d5f1f922..db7dc0b6667d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
- struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
+ struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
+
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
@@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
@@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
@@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
@@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
int i;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int loc, i;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -674,7 +697,8 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
- [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
};
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
@@ -699,7 +723,9 @@ static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] =
+ sizeof(struct mlx4_net_trans_rule_hw_vxlan)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
@@ -764,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
break;
+ case MLX4_NET_TRANS_RULE_ID_VXLAN:
+ rule_hw->vxlan.vni =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
+ rule_hw->vxlan.vni_mask =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
+ break;
+
default:
return -EINVAL;
}
@@ -895,6 +928,23 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
+ u32 max_range_qpn)
+{
+ int err;
+ u64 in_param;
+
+ in_param = ((u64) min_range_qpn) << 32;
+ in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
+
+ err = mlx4_cmd(dev, in_param, 0, 0,
+ MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -996,7 +1046,7 @@ out:
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
@@ -1087,7 +1137,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- amgm_index - dev->caps.num_mgms);
+ amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
@@ -1107,7 +1157,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e582a41a802b..6b65f7795215 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -783,6 +783,11 @@ enum {
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
};
+enum {
+ MLX4_NO_RR = 0,
+ MLX4_USE_RR = 1,
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -844,9 +849,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
@@ -1236,6 +1242,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d5758adceaa2..3af04c3f42ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
#include <linux/dcbnl.h>
#endif
#include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -255,6 +256,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
+ u8 queue_index;
+ cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
@@ -373,10 +376,14 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ rwlock_t clock_lock;
+ u32 nominal_c_mult;
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
@@ -434,6 +441,7 @@ struct mlx4_en_mc_list {
enum mlx4_en_mclist_act action;
u8 addr[ETH_ALEN];
u64 reg_id;
+ u64 tunnel_reg_id;
};
struct mlx4_en_frag_info {
@@ -565,7 +573,7 @@ struct mlx4_en_priv {
struct list_head filters;
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
-
+ u64 tunnel_reg_id;
};
enum mlx4_en_wol {
@@ -653,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
return cq->state & CQ_USER_PEND;
@@ -683,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
return false;
}
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
return false;
}
@@ -720,7 +728,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride, int node);
+ int qpn, u32 size, u16 stride,
+ int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -742,6 +751,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
@@ -787,6 +797,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index b3ee9bafff5e..24835853b753 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -346,7 +345,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 84cfb40bf451..74216071201f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -59,7 +58,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
@@ -96,7 +95,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@@ -164,7 +163,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 97d342fa5032..a58bcbf1b806 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (!table->refs[i])
+ continue;
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
+
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
@@ -800,6 +820,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
}
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+enum {
+ VXLAN_ENABLE_MODIFY = 1 << 7,
+ VXLAN_STEERING_MODIFY = 1 << 6,
+
+ VXLAN_ENABLE = 1 << 7,
+};
+
+struct mlx4_set_port_vxlan_context {
+ u32 reserved1;
+ u8 modify_flags;
+ u8 reserved2;
+ u8 enable_flags;
+ u8 steering;
+};
+
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+{
+ int err;
+ u32 in_mod;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_vxlan_context *context;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof(*context));
+
+ context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
+ context->enable_flags = VXLAN_ENABLE;
+ context->steering = steering;
+
+ in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2715e61dbb74..61d64ebffd56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,7 +35,6 @@
#include <linux/gfp.h>
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -250,7 +249,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
- mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2f3f2bc7f283..57428a0cb9dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1340,43 +1340,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_CQ_BUSY:
- err = -EBUSY;
- break;
-
- case RES_CQ_ALLOCATED:
- if (r->com.state != RES_CQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- else
- err = 0;
- break;
-
- case RES_CQ_HW:
- if (r->com.state != RES_CQ_ALLOCATED)
- err = -EINVAL;
- else
- err = 0;
- break;
-
- default:
+ } else if (state == RES_CQ_ALLOCATED) {
+ if (r->com.state != RES_CQ_HW)
err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_CQ_BUSY;
- if (cq)
- *cq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -1385,7 +1371,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
- enum res_cq_states state, struct res_srq **srq)
+ enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -1394,39 +1380,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_SRQ_BUSY:
+ } else if (state == RES_SRQ_ALLOCATED) {
+ if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
- break;
-
- case RES_SRQ_ALLOCATED:
- if (r->com.state != RES_SRQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- break;
-
- case RES_SRQ_HW:
- if (r->com.state != RES_SRQ_ALLOCATED)
- err = -EINVAL;
- break;
-
- default:
- err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
+ err = -EINVAL;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_SRQ_BUSY;
- if (srq)
- *srq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3634,7 +3606,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
- if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+ if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
@@ -3844,6 +3816,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 8fdf23753779..98faf870b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/srq.h>
@@ -117,7 +116,7 @@ err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
- mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
@@ -145,7 +144,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8675d26a678b..405c4fbcd0ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -32,7 +32,6 @@
#include <asm-generic/kmap_types.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index c2d660be6f76..43c5f4809526 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -201,10 +201,23 @@ EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- int type, struct mlx5_cq_modify_params *params)
+ struct mlx5_modify_cq_mbox_in *in, int in_sz)
{
- return -ENOSYS;
+ struct mlx5_modify_cq_mbox_out out;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
+ err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
}
+EXPORT_SYMBOL(mlx5_core_modify_cq);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 80f6d127257a..10e1f1a18255 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -275,7 +275,7 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- int index)
+ int index, int *is_str)
{
struct mlx5_query_qp_mbox_out *out;
struct mlx5_qp_context *ctx;
@@ -293,19 +293,40 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
goto out;
}
+ *is_str = 0;
ctx = &out->ctx;
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = be32_to_cpu(ctx->flags) >> 28;
+ param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ *is_str = 1;
break;
case QP_XPORT:
- param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
+ param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ *is_str = 1;
break;
case QP_MTU:
- param = ctx->mtu_msgmax >> 5;
+ switch (ctx->mtu_msgmax >> 5) {
+ case IB_MTU_256:
+ param = 256;
+ break;
+ case IB_MTU_512:
+ param = 512;
+ break;
+ case IB_MTU_1024:
+ param = 1024;
+ break;
+ case IB_MTU_2048:
+ param = 2048;
+ break;
+ case IB_MTU_4096:
+ param = 4096;
+ break;
+ default:
+ param = 0;
+ }
break;
case QP_N_RECV:
param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
@@ -414,6 +435,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
struct mlx5_field_desc *desc;
struct mlx5_rsc_debug *d;
char tbuf[18];
+ int is_str = 0;
u64 field;
int ret;
@@ -424,7 +446,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
case MLX5_DBG_RSC_QP:
- field = qp_read_field(d->dev, d->object, desc->i);
+ field = qp_read_field(d->dev, d->object, desc->i, &is_str);
break;
case MLX5_DBG_RSC_EQ:
@@ -440,7 +462,12 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
return -EINVAL;
}
- ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
+ if (is_str)
+ ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+ else
+ ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
return -EFAULT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 40a9f5ed814d..a064f06e0cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -460,7 +460,10 @@ disable_msix:
err_stop_poll:
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return err;
+ }
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
@@ -503,7 +506,10 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return;
+ }
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 37b6ad1f9a1b..d59790a82bc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -99,7 +99,7 @@ enum {
enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
- MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
+ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
@@ -192,10 +192,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
struct fw_page *fp;
unsigned n;
- if (list_empty(&dev->priv.free_list)) {
+ if (list_empty(&dev->priv.free_list))
return -ENOMEM;
- mlx5_core_warn(dev, "\n");
- }
fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
@@ -208,7 +206,7 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
if (!fp->free_count)
list_del(&fp->list);
- *addr = fp->addr + n * 4096;
+ *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
return 0;
}
@@ -224,14 +222,15 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
return;
}
- n = (addr & ~PAGE_MASK) % 4096;
+ n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
} else if (fwp->free_count == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index f6afe7b5a675..8c9ac870ecb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -57,7 +57,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
in->arg = cpu_to_be32(arg);
in->register_id = cpu_to_be16(reg_num);
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(out) + size_out);
+ sizeof(*out) + size_out);
if (err)
goto ex2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 54faf8bfcaf4..510576213dd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -74,7 +74,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_out dout;
int err;
- memset(&dout, 0, sizeof(dout));
+ memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
@@ -84,7 +84,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
}
if (out.hdr.status) {
- pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
+ mlx5_core_warn(dev, "current num of QPs 0x%x\n",
+ atomic_read(&dev->num_qps));
return mlx5_cmd_status_to_err(&out.hdr);
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 106eb972f2ac..16435b3cfa9f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ddd252a3da9c..ce84dc289c8f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4128,10 +4128,10 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->override_addr, mac_addr))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->address[i], mac_addr))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
@@ -4149,7 +4149,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ if (ether_addr_equal(hw->address[i], mac_addr)) {
memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
@@ -5853,15 +5853,12 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
- int rc;
int result = 0;
struct mii_ioctl_data *data = if_mii(ifr);
if (down_interruptible(&priv->proc_sem))
return -ERESTARTSYS;
- /* assume success */
- rc = 0;
switch (cmd) {
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
@@ -7104,8 +7101,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
ETH_ALEN);
else {
memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
- if (!memcmp(sw->other_addr, hw->override_addr,
- ETH_ALEN))
+ if (ether_addr_equal(sw->other_addr, hw->override_addr))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index cbd013379252..5020fd47825d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 79257f71c5d9..a5512a97cc4d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -24,7 +24,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 04b3ec1352f1..9e4ddbba7036 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -37,7 +37,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d3b47003a575..dbccf1de49ec 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ChangeLog
@@ -2236,7 +2235,6 @@ out_disable:
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
out:
return err;
}
@@ -2260,7 +2258,6 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index fbe5363cb89c..089b713b9f7b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2148,7 +2148,7 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
* __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f9876ea8c8bf..1ded50ca1600 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,6 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -507,7 +508,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* if rss is disabled/enabled, so key off of that.
*/
if (ext_info.rth_value)
- skb->rxhash = ext_info.rth_value;
+ skb_set_hash(skb, ext_info.rth_value,
+ PKT_HASH_TYPE_L3);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -1429,7 +1431,7 @@ vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
return status;
}
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ while (!ether_addr_equal(mac->macaddr, macaddr)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
@@ -1970,7 +1972,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -3189,7 +3191,7 @@ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
return status;
}
-static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
{
struct hwtstamp_config config;
int i;
@@ -3250,6 +3252,21 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
return 0;
}
+static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = (vdev->rx_hwts ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -3263,19 +3280,15 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vxgedev *vdev = netdev_priv(dev);
- int ret;
switch (cmd) {
case SIOCSHWTSTAMP:
- ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
- if (ret)
- return ret;
- break;
+ return vxge_hwtstamp_set(vdev, rq->ifr_data);
+ case SIOCGHWTSTAMP:
+ return vxge_hwtstamp_get(vdev, rq->ifr_data);
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 36ca40f8f249..3a79d93b8445 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -427,7 +427,6 @@ void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
}
void vxge_initialize_ethtool_ops(struct net_device *ndev);
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
/* #define VXGE_DEBUG_INIT: debug for initialization functions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 99749bd07d72..9e1aaa7f36bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1956,8 +1956,7 @@ exit:
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_delete
*
*/
enum vxge_hw_status
@@ -1979,45 +1978,13 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_add
*
*/
enum vxge_hw_status
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
index 4a518a3b131c..ba6f833bb059 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -1918,9 +1918,6 @@ vxge_hw_ring_rxd_post_post(
struct __vxge_hw_ring *ring_handle,
void *rxdh);
-enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
-
void
vxge_hw_ring_rxd_post_post_wmb(
struct __vxge_hw_ring *ring_handle,
@@ -2186,11 +2183,6 @@ vxge_hw_vpath_vid_add(
u64 vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index e6f0a4366f90..31eb911e4763 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e8b9514718b..70cf97fe67f2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -26,8 +26,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -59,7 +58,6 @@
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/random.h>
-#include <linux/init.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -6020,7 +6018,6 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
out_error:
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
- pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
out_unmap:
@@ -6091,7 +6088,6 @@ static void nv_remove(struct pci_dev *pci_dev)
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
free_netdev(dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index ba3ca18611f7..422d9b51ac24 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a9003071d51..2a55d6d53ee6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index ff3ad70935a6..51250363566b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057b..91ce07c8306c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_API_H_
#define _PCH_GBE_API_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f0ceb89af931..826f0ccdc23c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_api.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 27ffe0ebf0a6..464e91058c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index cf7c9b3a255b..08d4be616064 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 8b7ff75fc8e0..a5cad5ea9436 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 0cbe69206e04..95ad0151ad02 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_PHY_H_
#define _PCH_GBE_PHY_H_
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 07a890eb72ad..9a6cb482dcd0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1053,7 +1053,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size;
+ int data_size, yf_size;
u8 *buf_addr;
if(!desc->result_status)
@@ -1070,6 +1070,9 @@ static int yellowfin_rx(struct net_device *dev)
__func__, frame_status);
if (--boguscnt < 0)
break;
+
+ yf_size = sizeof(struct yellowfin_desc);
+
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
@@ -1096,12 +1099,12 @@ static int yellowfin_rx(struct net_device *dev)
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- dev->dev_addr, 6) != 0 &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- "\377\377\377\377\377\377", 6) != 0) {
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ dev->dev_addr) &&
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ "\377\377\377\377\377\377")) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index dbaa49e58b0c..9abf70d74b31 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -13,11 +13,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index f2749d46c125..a5807703ab96 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PASEMI_MAC_H
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0efe..25fae568261f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f4..e14e60c88381 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution
# in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 9adcdbb49476..6e426ae94692 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 1bcaf45aa864..6f6be57f4690 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98a..87e073c6e291 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 0c64c82b9acf..a310c2f6502a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 67efe754367d..db4280ce9c09 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -663,7 +661,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
list_for_each(head, del_list) {
cur = list_entry(head, nx_mac_list_t, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
list_move_tail(head, &adapter->mac_list);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df03..7433c4d21601 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index cc68657f0536..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cfebf99..70849dea32b1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0758b9435358..2eabd44f8914 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f2a7c7166e24..f19f81cde134 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 52
-#define QLCNIC_LINUX_VERSIONID "5.3.52"
+#define _QLCNIC_LINUX_SUBVERSION 55
+#define QLCNIC_LINUX_VERSIONID "5.3.55"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -105,6 +105,8 @@
#define QLCNIC_DEF_TX_RINGS 4
#define QLCNIC_MAX_VNIC_TX_RINGS 4
#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
enum qlcnic_queue_type {
QLCNIC_TX_QUEUE = 1,
@@ -115,6 +117,10 @@ enum qlcnic_queue_type {
#define QLCNIC_VNIC_MODE 0xFF
#define QLCNIC_DEFAULT_MODE 0x0
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -365,6 +371,7 @@ struct qlcnic_rx_buffer {
*/
#define QLCNIC_INTR_COAL_TYPE_RX 1
#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
@@ -374,7 +381,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
-#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_DEV_INFO_SIZE 2
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -462,8 +469,10 @@ struct qlcnic_hardware_context {
u16 max_rx_ques;
u16 max_mtu;
u32 msg_enable;
- u16 act_pci_func;
+ u16 total_nic_func;
u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
u32 capabilities;
u32 extra_capability[3];
@@ -791,9 +800,10 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_VLAN_ADD 3
#define QLCNIC_MAC_VLAN_DEL 4
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
struct list_head list;
uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
};
/* MAC Learn */
@@ -860,7 +870,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
-#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -954,6 +964,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -963,6 +974,9 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
+
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -1047,6 +1061,9 @@ struct qlcnic_adapter {
u8 drv_tx_rings; /* max tx rings supported by driver */
u8 drv_sds_rings; /* max sds rings supported by driver */
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
u8 rx_csum;
u8 portnum;
@@ -1072,6 +1089,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
bool drv_mac_learn;
bool fdb_mac_learn;
+ bool rx_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
@@ -1260,7 +1278,7 @@ struct qlcnic_pci_func_cfg {
u16 port_num;
u8 pci_func;
u8 func_state;
- u8 def_mac_addr[6];
+ u8 def_mac_addr[ETH_ALEN];
};
struct qlcnic_npar_func_cfg {
@@ -1462,8 +1480,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
-void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
-void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
@@ -1499,16 +1515,11 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
-void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1543,9 +1554,7 @@ int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
-void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1558,13 +1567,11 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
/* Functions from qlcnic_ethtool.c */
int qlcnic_check_loopback_buff(unsigned char *, u8 []);
int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
-int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1573,10 +1580,9 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
-int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
void qlcnic_set_drv_version(struct qlcnic_adapter *);
@@ -1605,11 +1611,8 @@ void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1617,7 +1620,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
@@ -1632,15 +1635,15 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
-void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
-int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_schedule_multi(struct net_device *);
-void qlcnic_vf_add_mc_list(struct net_device *, u16);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
/*
* QLOGIC Board information
@@ -1674,11 +1677,8 @@ static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
- dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
- else
- dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
- adapter->drv_tx_rings);
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1744,7 +1744,8 @@ struct qlcnic_hardware_ops {
int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
void (*napi_enable) (struct qlcnic_adapter *);
void (*napi_disable) (struct qlcnic_adapter *);
- void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
int (*config_rss) (struct qlcnic_adapter *, int);
int (*config_hw_lro) (struct qlcnic_adapter *, int);
int (*config_loopback) (struct qlcnic_adapter *, u8);
@@ -1759,6 +1760,15 @@ struct qlcnic_hardware_ops {
pci_channel_state_t);
pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1931,9 +1941,10 @@ static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->napi_disable(adapter);
}
-static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
{
- adapter->ahw->hw_ops->config_intr_coal(adapter);
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
}
static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
@@ -1985,6 +1996,11 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
{
if (adapter->ahw->hw_ops->read_phys_port_id)
@@ -2027,6 +2043,54 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
}
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
@@ -2036,10 +2100,10 @@ static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
/* When operating in a muti tx mode, driver needs to write 0x1
* to src register, instead of 0x0 to disable receiving interrupt.
*/
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2048,13 +2112,42 @@ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
writel(0, sds_ring->crb_intr_mask);
}
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
/* When operating in a muti tx mode, driver needs to write 0x0
* to src register, instead of 0x1 to enable receiving interrupts.
*/
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2140,4 +2233,26 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f776f99f7915..4146664d4d6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,8 +13,26 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -34,7 +52,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
- {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@ -68,7 +86,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
- {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -180,6 +198,11 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.io_error_detected = qlcnic_83xx_io_error_detected,
.io_slot_reset = qlcnic_83xx_io_slot_reset,
.io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
};
@@ -267,11 +290,22 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
{
- int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
num_msix = adapter->drv_sds_rings;
/* account for AEN interrupt MSI-X based interrupts */
@@ -280,29 +314,44 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
num_msix += adapter->drv_tx_rings;
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- num_msix = adapter->ahw->num_msix;
- else {
- if (qlcnic_sriov_vf_check(adapter))
- return -EINVAL;
- num_msix = 1;
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
}
+
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
sizeof(struct qlcnic_intrpt_config));
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- /* MSI-X enablement failed, use legacy interrupt */
- adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
- adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
- adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
- adapter->msix_entries[0].vector = adapter->pdev->irq;
- dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
- }
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_legacy(adapter);
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -312,35 +361,22 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
ahw->intr_tbl[i].id = i;
ahw->intr_tbl[i].src = 0;
}
+
return 0;
}
-inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
writel(0, adapter->tgt_mask_reg);
}
-inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
if (adapter->tgt_mask_reg)
writel(1, adapter->tgt_mask_reg);
}
-/* Enable MSI-x and INT-x interrupts */
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(0, sds_ring->crb_intr_mask);
-}
-
-/* Disable MSI-x and INT-x interrupts */
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(1, sds_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
*adapter)
{
u32 mask;
@@ -477,7 +513,7 @@ irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -634,10 +670,10 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
return status;
}
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@ -869,7 +905,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
return;
}
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
@@ -1276,8 +1312,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
/* send the mailbox command*/
err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
- dev_err(&adapter->pdev->dev,
- "Failed to create Tx ctx in firmware 0x%x\n", err);
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
goto out;
}
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
@@ -1288,8 +1324,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
- tx->ctx_id, mbx_out->state);
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
out:
qlcnic_free_mbx_args(&cmd);
return err;
@@ -1341,7 +1378,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1366,7 +1403,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_disable_intr(adapter, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1386,6 +1423,33 @@ out:
netif_device_attach(netdev);
}
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
u32 beacon)
{
@@ -1498,8 +1562,7 @@ int qlcnic_83xx_set_led(struct net_device *netdev,
return err;
}
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
- int enable)
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_cmd_args cmd;
int status;
@@ -1507,21 +1570,21 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
if (qlcnic_sriov_vf_check(adapter))
return;
- if (enable) {
+ if (enable)
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_INIT_NIC_FUNC);
- if (status)
- return;
-
- cmd.req.arg[1] = BIT_0 | BIT_31;
- } else {
+ else
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_STOP_NIC_FUNC);
- if (status)
- return;
- cmd.req.arg[1] = BIT_0 | BIT_31;
- }
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
status = qlcnic_issue_cmd(adapter, &cmd);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1531,7 +1594,7 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
qlcnic_free_mbx_args(&cmd);
}
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1548,7 +1611,7 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1590,7 +1653,9 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
u32 *interface_id)
{
if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
} else {
if (!qlcnic_sriov_vf_check(adapter))
*interface_id = adapter->recv_ctx->context_id << 16;
@@ -1617,7 +1682,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
return err;
@@ -1711,7 +1780,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
ahw->extend_lb_time = 0;
}
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
@@ -1780,7 +1849,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = ahw->port_config, max_wait_count;
@@ -2015,8 +2084,8 @@ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
}
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
- u8 type, struct qlcnic_cmd_args *cmd)
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
{
switch (type) {
case QLCNIC_SET_STATION_MAC:
@@ -2060,37 +2129,130 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
return err;
}
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
{
- int err;
- u16 temp;
- struct qlcnic_cmd_args cmd;
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
- if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
- return;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
if (err)
- return;
+ return err;
- if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
- temp = adapter->recv_ctx->context_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
- temp = coal->rx_time_us;
- cmd.req.arg[2] = coal->rx_packets | temp << 16;
- } else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
- temp = adapter->tx_ring->ctx_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
- temp = coal->tx_time_us;
- cmd.req.arg[2] = coal->tx_packets | temp << 16;
- }
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
cmd.req.arg[3] = coal->flag;
+
err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS)
- dev_info(&adapter->pdev->dev,
- "Failed to send interrupt coalescence parameters\n");
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
}
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
@@ -2119,7 +2281,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
qlcnic_advert_link_change(adapter, link_status);
}
-irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
@@ -2145,36 +2307,6 @@ out:
return IRQ_HANDLED;
}
-int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
-{
- int err = -EIO;
- struct qlcnic_cmd_args cmd;
-
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- dev_err(&adapter->pdev->dev,
- "%s: Error, invoked by non management func\n",
- __func__);
- return err;
- }
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
- if (err)
- return err;
-
- cmd.req.arg[1] = (port & 0xf) | BIT_4;
- err = qlcnic_issue_cmd(adapter, &cmd);
-
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
- err);
- err = -EIO;
- }
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-
-}
-
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *nic)
{
@@ -2268,11 +2400,37 @@ out:
return err;
}
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
struct qlcnic_cmd_args cmd;
int i, err = 0, j = 0;
u32 temp;
@@ -2283,16 +2441,20 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
- ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
- for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
- if (pci_info->type == QLCNIC_TYPE_NIC)
- ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
pci_info->default_port = temp;
i++;
@@ -2310,6 +2472,13 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
return err;
@@ -3459,7 +3628,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3481,7 +3650,7 @@ int qlcnic_83xx_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlc_83xx_idc *idc = &ahw->idc;
@@ -3834,8 +4003,8 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
return 0;
}
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3856,7 +4025,7 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
@@ -3879,7 +4048,7 @@ disconnect:
return PCI_ERS_RESULT_DISCONNECT;
}
-void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index a6a33508e401..f92485ca21d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -324,6 +324,11 @@ struct qlc_83xx_idc {
char **name;
};
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
/* Device States */
enum qlcnic_83xx_states {
QLC_83XX_IDC_DEV_UNKNOWN,
@@ -376,6 +381,8 @@ enum qlcnic_83xx_states {
/* LED configuration settings */
#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
#define QLC_83XX_LED_RATE 0xff
#define QLC_83XX_LED_ACT (1 << 10)
#define QLC_83XX_LED_MOD (0 << 13)
@@ -518,6 +525,11 @@ enum qlc_83xx_ext_regs {
QLC_83XX_ASIC_TEMP,
};
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_INIT_FW_RESOURCES BIT_31
+
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -532,17 +544,13 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
@@ -563,32 +571,22 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
- struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
struct qlcnic_info *);
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
-irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
-int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
@@ -610,9 +608,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
int qlcnic_83xx_init(struct qlcnic_adapter *, int);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
@@ -620,7 +616,6 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
@@ -648,9 +643,6 @@ int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
-int qlcnic_83xx_shutdown(struct pci_dev *);
-int qlcnic_83xx_resume(struct qlcnic_adapter *);
int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
@@ -658,9 +650,4 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
-void qlcnic_83xx_io_resume(struct pci_dev *);
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 918e18ddf038..90a2dda351ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -39,6 +39,9 @@
static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
/* Template header */
struct qlc_83xx_reset_hdr {
@@ -380,7 +383,7 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
qlcnic_up(adapter, netdev);
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
return 0;
}
@@ -614,8 +617,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_sriov_pf_reinit(adapter);
if (err)
@@ -1529,7 +1531,7 @@ static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
return -EIO;
}
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
{
int err;
@@ -1602,7 +1604,7 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
}
}
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
{
struct qlcnic_hardware_context *ahw = p_dev->ahw;
u32 addr, count, prev_ver, curr_ver;
@@ -1946,7 +1948,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
p_dev->ahw->reset.seq_index = index;
}
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
{
p_dev->ahw->reset.seq_index = 0;
@@ -2029,7 +2031,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_info nic_info;
@@ -2213,9 +2215,9 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- struct qlcnic_dcb *dcb;
int err = 0;
+ adapter->rx_mac_learn = false;
ahw->msix_supported = !!qlcnic_use_msi_x;
qlcnic_83xx_init_rings(adapter);
@@ -2265,8 +2267,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
@@ -2279,11 +2280,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
return 0;
@@ -2314,7 +2310,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 734d28602ac3..be7d7a62cc0d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -8,7 +8,7 @@
#include "qlcnic.h"
#include "qlcnic_hw.h"
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
{
if (lock) {
if (qlcnic_83xx_lock_driver(adapter))
@@ -107,7 +107,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
npar = adapter->npars;
- for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
npar->pci_func, npar->active, npar->type,
npar->phy_port, npar->min_bw, npar->max_bw,
@@ -115,7 +115,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
}
dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
+ ahw->max_pci_func, ahw->total_nic_func);
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -224,10 +224,14 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 859cb161fc63..64dcbf33d8f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -91,18 +91,6 @@ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
cmd->rsp.arg = NULL;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
-{
- int i;
-
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
- if (adapter->npars[i].pci_func == pci_func)
- return i;
- }
-
- return -1;
-}
-
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
@@ -966,13 +954,15 @@ out_free_dma:
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
- int err = 0, i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info_le);
- size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+ int err = 0, i;
pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
&pci_info_dma_t, GFP_KERNEL);
@@ -989,14 +979,16 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
cmd.req.arg[3] = pci_size;
err = qlcnic_issue_cmd(adapter, &cmd);
- adapter->ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
pci_info->type = le16_to_cpu(npar->type);
- if (pci_info->type == QLCNIC_TYPE_NIC)
- adapter->ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -1011,6 +1003,14 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
out_free_dma:
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
@@ -1203,7 +1203,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
@@ -1236,15 +1236,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
int err;
u32 arg1;
- struct qlcnic_cmd_args cmd;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
- if (port >= QLCNIC_MAX_PCI_FUNC)
+ if (port >= ahw->max_vnic_func)
goto err_ret;
} else if (func_esw == QLCNIC_STATS_ESWITCH) {
if (port >= QLCNIC_NIU_MAX_XG_PORTS)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 86bca7c14f99..77f1bce432d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -15,7 +15,6 @@
#define QLC_DCB_GET_MAP(V) (1 << V)
-#define QLC_DCB_AEN_BIT 0x2
#define QLC_DCB_FW_VER 0x2
#define QLC_DCB_MAX_TC 0x8
#define QLC_DCB_MAX_APP 0x8
@@ -71,7 +70,6 @@ static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
@@ -179,7 +177,6 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
- .register_aen = qlcnic_83xx_dcb_register_aen,
.aen_handler = qlcnic_83xx_dcb_aen_handler,
};
@@ -260,6 +257,9 @@ int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
if (!dcb)
return -ENOMEM;
@@ -280,7 +280,6 @@ static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
return;
adapter = dcb->adapter;
- qlcnic_dcb_register_aen(dcb, 0);
while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
@@ -304,7 +303,6 @@ static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
qlcnic_dcb_get_hw_capability(dcb);
qlcnic_dcb_get_cee_cfg(dcb);
- qlcnic_dcb_register_aen(dcb, 1);
}
static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
@@ -642,29 +640,6 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
-{
- u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
- struct qlcnic_adapter *adapter = dcb->adapter;
- struct qlcnic_cmd_args cmd;
- int err;
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
- if (err)
- return err;
-
- cmd.req.arg[1] = QLC_DCB_AEN_BIT;
-
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
- (flag ? "register" : "unregister"), err);
-
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-}
-
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
u32 *val = data;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index c04ae0cdc108..3cf4a10fbe1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -25,7 +25,6 @@ struct qlcnic_dcb_ops {
int (*get_hw_capability) (struct qlcnic_dcb *);
int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
void (*init_dcbnl_ops) (struct qlcnic_dcb *);
- int (*register_aen) (struct qlcnic_dcb *, bool);
void (*aen_handler) (struct qlcnic_dcb *, void *);
int (*get_cee_cfg) (struct qlcnic_dcb *);
void (*get_info) (struct qlcnic_dcb *);
@@ -103,13 +102,6 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return 0;
}
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
-{
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(dcb, flag);
-}
-
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
{
if (dcb && dcb->ops->aen_handler)
@@ -121,4 +113,10 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->init_dcbnl_ops)
dcb->ops->init_dcbnl_ops(dcb);
}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 6b08194aa0d4..acee1a5d80c6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -229,7 +229,7 @@ static const u32 ext_diag_registers[] = {
-1
};
-#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_MGMT_API_VERSION 3
#define QLCNIC_ETHTOOL_REGS_VER 4
static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@ -278,21 +278,8 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
}
-static int
-qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_get_settings(adapter, ecmd);
- else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_get_settings(adapter, ecmd);
-
- return -EIO;
-}
-
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
@@ -433,6 +420,20 @@ skip:
return 0;
}
+static int qlcnic_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+
static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
@@ -527,6 +528,9 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
if (qlcnic_82xx_check(adapter))
i = qlcnic_82xx_get_registers(adapter, regs_buff);
else
@@ -732,6 +736,7 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->rx_count);
return err;
}
+ adapter->drv_rss_rings = channel->rx_count;
}
if (channel->tx_count) {
@@ -742,10 +747,12 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count);
return err;
}
+ adapter->drv_tss_rings = channel->tx_count;
}
- err = qlcnic_setup_rings(adapter, channel->rx_count,
- channel->tx_count);
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
adapter->drv_sds_rings, adapter->drv_tx_rings);
@@ -1052,7 +1059,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int drv_tx_rings = adapter->drv_tx_rings;
@@ -1491,9 +1498,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_nic_intr_coalesce *coal;
- u32 rx_coalesce_usecs, rx_max_frames;
- u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
@@ -1503,82 +1508,31 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
- ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
- coal = &adapter->ahw->coal;
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
- if (qlcnic_83xx_check(adapter)) {
- if (!ethcoal->tx_coalesce_usecs ||
- !ethcoal->tx_max_coalesced_frames ||
- !ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- } else {
- tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
- tx_max_frames = ethcoal->tx_max_coalesced_frames;
- rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
- rx_max_frames = ethcoal->rx_max_coalesced_frames;
- coal->flag = 0;
-
- if ((coal->rx_time_us == rx_coalesce_usecs) &&
- (coal->rx_packets == rx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_TX;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
- (coal->tx_packets == tx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- } else {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- }
- }
- } else {
- if (!ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- } else {
- coal->flag = 0;
- coal->rx_time_us = ethcoal->rx_coalesce_usecs;
- coal->rx_packets = ethcoal->rx_max_coalesced_frames;
- }
- }
-
- qlcnic_config_intr_coalesce(adapter);
-
- return 0;
+ return err;
}
static int qlcnic_get_intr_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d262211b03b3..34e467b239a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,6 @@ struct qlcnic_legacy_intr_set {
};
#define QLCNIC_MSIX_BASE 0x132110
-#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
#define FLASH_ROM_WINDOW 0x42110030
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6f7f60c09f07..03d18a0be6ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -455,14 +455,14 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
int err = -EINVAL;
/* Delete MAC from the existing list */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
if (err)
@@ -477,17 +477,18 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
/* look up if already exists */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
return 0;
}
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
if (cur == NULL)
return -ENOMEM;
@@ -499,11 +500,12 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
return -EIO;
}
+ cur->vlan_id = vlan;
list_add_tail(&cur->list, &adapter->mac_list);
return 0;
}
-void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -516,8 +518,7 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (!qlcnic_sriov_vf_check(adapter))
- qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
@@ -526,15 +527,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
+ } else if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
-
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -545,14 +542,15 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (!qlcnic_sriov_vf_check(adapter)) {
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
- !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
- }
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
@@ -561,16 +559,17 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_vlan_list *cur;
struct netdev_hw_addr *ha;
- struct qlcnic_mac_list_s *cur;
+ size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
if (qlcnic_sriov_vf_check(adapter)) {
if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev) {
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
- GFP_ATOMIC);
+ temp = sizeof(struct qlcnic_mac_vlan_list);
+ cur = kzalloc(temp, GFP_ATOMIC);
if (cur == NULL)
break;
memcpy(cur->mac_addr,
@@ -605,11 +604,11 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
qlcnic_sre_macaddr_change(adapter,
cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -756,10 +755,7 @@ int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
return 0;
}
-/*
- * Send the interrupt coalescing parameter set by ethtool to the card.
- */
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -781,10 +777,32 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parametrs\n");
+
+ return rv;
}
-#define QLCNIC_ENABLE_IPV4_LRO 1
-#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
@@ -948,7 +966,7 @@ int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
u64 word;
@@ -1247,7 +1265,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
return 0;
}
-void
+static void
qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1258,7 +1276,7 @@ qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
mutex_unlock(&adapter->ahw->mem_lock);
}
-void
+static void
qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1494,7 +1512,7 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
return 0;
}
-int
+static int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
@@ -1534,19 +1552,34 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int err;
+ u8 beacon_state;
+ int err = 0;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
- if (!err) {
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (!err)
- *h_state = cmd.rsp.arg[1];
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
}
- qlcnic_free_mbx_args(&cmd);
- return err;
+
+ return;
}
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 13303e7d1ed7..63d75617d445 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -162,16 +162,18 @@ struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
u64 *uaddr, u16 vlan_id);
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
__be32, int);
@@ -181,9 +183,6 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index ad1531ae3aa8..54ebf300332a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -124,41 +124,16 @@
#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
- struct qlcnic_host_rds_ring *, u16, u16);
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
-inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(0x0, tx_ring->crb_intr_mask);
-}
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
-
-static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(1, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(0, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(1, tx_ring->crb_intr_mask);
-}
-
-static inline u8 qlcnic_mac_hash(u64 mac)
-{
- return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
}
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
@@ -202,7 +177,7 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
struct hlist_node *n;
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
tmp_fil->vlan_id == vlan_id)
return tmp_fil;
}
@@ -210,8 +185,8 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
return NULL;
}
-void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
- int loopback_pkt, u16 vlan_id)
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
@@ -221,8 +196,11 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
u8 hindex, op;
int ret;
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) &
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
(adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
@@ -322,31 +300,47 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct net_device *netdev = adapter->netdev;
+ u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *n;
struct hlist_head *head;
- struct net_device *netdev = adapter->netdev;
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct hlist_node *n;
u64 src_addr = 0;
u16 vlan_id = 0;
- u8 hindex;
+ u8 hindex, hval;
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
+ if (!qlcnic_sriov_pf_check(adapter)) {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+ } else {
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (vlan_tx_tag_present(skb)) {
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
+ !vlan_id)
+ return;
+ }
if (adapter->fhash.fnum >= adapter->fhash.fmax) {
adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac addresses\n",
- adapter->fhash.fmax);
+ netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
+ adapter->fhash.fmax, adapter->fhash.fnum);
return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
@@ -689,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
- /* Do not advertise Link up if the port is in loopback mode */
- if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ adapter->ahw->linkup = 1;
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
return;
+ }
netdev_info(netdev, "NIC Link is up\n");
- adapter->ahw->linkup = 1;
netif_carrier_on(netdev);
}
}
@@ -862,7 +861,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
@@ -903,7 +902,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1015,9 +1014,9 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *ring,
- u16 index, u16 cksum)
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
@@ -1156,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1236,7 +1235,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
return buffer;
}
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
{
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_adapter *adapter = sds_ring->adapter;
@@ -1466,8 +1465,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
@@ -1535,13 +1533,12 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
@@ -1562,7 +1559,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1572,7 +1569,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
qlcnic_check_multi_tx(adapter)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_int(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -1601,7 +1598,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, is_lb_pkt;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -1619,19 +1617,19 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
-
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
skb_put(skb, length);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1666,15 +1664,16 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
int l2_hdr_offset, l4_hdr_offset;
int index, is_lb_pkt;
u16 lro_length, length, data_offset, gso_size;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_83xx_hndl(sts_data[0]);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1688,12 +1687,6 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
if (qlcnic_83xx_is_tstamp(sts_data[1]))
data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
else
@@ -1702,7 +1695,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
skb_put(skb, lro_length + data_offset);
skb_pull(skb, l2_hdr_offset);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1832,7 +1832,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1855,7 +1855,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1874,7 +1874,7 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
if (work_done) {
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -1892,7 +1892,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1912,7 +1912,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
@@ -1920,7 +1920,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
}
@@ -1938,7 +1938,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_disable_intr(adapter, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1947,7 +1947,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -2027,8 +2027,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_tx_rings(adapter);
}
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
- int ring, u64 sts_data[])
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 550791b8fbae..ba78c7481fa3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -81,6 +81,16 @@ static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,12 +318,12 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -337,7 +347,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -546,6 +556,11 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_error_detected = qlcnic_82xx_io_error_detected,
.io_slot_reset = qlcnic_82xx_io_slot_reset,
.io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -588,9 +603,6 @@ void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
QLCNIC_TX_QUEUE);
else
adapter->drv_tx_rings = tx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
- adapter->drv_tx_rings);
}
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
@@ -601,25 +613,79 @@ void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
QLCNIC_RX_QUEUE);
else
adapter->drv_sds_rings = rx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
- adapter->drv_sds_rings);
}
-int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int drv_tx_rings, drv_sds_rings, tx_vector;
- int err = -1, i;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+restore:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
- if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- drv_tx_rings = 0;
- tx_vector = 0;
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
} else {
- drv_tx_rings = adapter->drv_tx_rings;
- tx_vector = 1;
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+ goto restore;
+
+ err = -EIO;
}
+ return err;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = -1, vector;
+
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
sizeof(struct msix_entry),
@@ -628,48 +694,43 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
- enable_msix:
- for (i = 0; i < num_msix; i++)
- adapter->msix_entries[i].entry = i;
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- if (qlcnic_83xx_check(adapter)) {
- adapter->ahw->num_msix = num_msix;
- /* subtract mail box and tx ring vectors */
- adapter->drv_sds_rings = num_msix -
- drv_tx_rings - 1;
- } else {
- adapter->ahw->num_msix = num_msix;
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > 1))
- drv_sds_rings = num_msix - drv_tx_rings;
- else
- drv_sds_rings = num_msix;
-
- adapter->drv_sds_rings = drv_sds_rings;
- }
+ adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
} else if (err > 0) {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
- if (qlcnic_83xx_check(adapter)) {
- if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
- return err;
- err -= drv_tx_rings + 1;
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
- num_msix += drv_tx_rings + 1;
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -EIO;
} else {
- num_msix = rounddown_pow_of_two(err);
- if (qlcnic_check_multi_tx(adapter))
- num_msix += drv_tx_rings;
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -EIO;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
}
if (num_msix) {
@@ -680,14 +741,29 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
}
} else {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
}
}
return err;
}
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
+}
+
static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
int err = 0;
@@ -722,25 +798,29 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
int num_msix, err = 0;
- num_msix = adapter->drv_sds_rings;
-
- if (qlcnic_check_multi_tx(adapter))
- num_msix += adapter->drv_tx_rings;
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_disable_multi_tx(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
- return err;
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
}
return 0;
@@ -800,25 +880,26 @@ static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int ret;
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- switch (adapter->ahw->port_type) {
+ switch (ahw->port_type) {
case QLCNIC_GBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
break;
case QLCNIC_XGBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
break;
}
return 0;
}
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -846,12 +927,13 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -859,7 +941,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (ret)
goto err_pci_info;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = ahw->total_nic_func;
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
act_pci_func, GFP_KERNEL);
@@ -875,10 +957,10 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < ahw->max_vnic_func; i++) {
pfn = pci_info[i].id;
- if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1346,7 +1428,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (!adapter->npars[i].eswitch_status)
continue;
@@ -1409,7 +1491,7 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
if (!adapter->npars[i].eswitch_status)
@@ -1484,7 +1566,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -1685,6 +1767,33 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
}
}
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
@@ -1717,7 +1826,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
- qlcnic_config_intr_coalesce(adapter);
+ qlcnic_config_def_intr_coalesce(adapter);
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
@@ -1728,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
+ netif_tx_start_all_queues(netdev);
return 0;
}
@@ -1862,7 +1972,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1885,7 +1995,6 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -1894,15 +2003,7 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto err_out;
}
- /* Initialize interrupt coalesce parameters */
- ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
- ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- if (qlcnic_83xx_check(adapter)) {
- ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -1963,7 +2064,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1995,7 +2096,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
return 0;
}
@@ -2032,10 +2133,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@ -2211,7 +2312,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2289,7 +2389,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
- adapter->ahw->revision_id = pdev->revision;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
if (qlcnic_mac_learn == FDB_MAC_LEARN)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@ -2333,10 +2434,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->flags |= QLCNIC_NEED_FLR;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2365,6 +2462,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_dcb_enable(adapter->dcb);
+
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2498,13 +2597,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter->dcb);
-
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_detach_mailbox_work(adapter);
@@ -2512,6 +2609,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
kfree(ahw->fw_info);
}
+ qlcnic_dcb_free(adapter->dcb);
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -2606,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev)
err = __qlcnic_up(adapter, netdev);
if (err)
- goto err_out;
-
- netif_tx_start_all_queues(netdev);
-
- return 0;
+ qlcnic_detach(adapter);
-err_out:
- qlcnic_detach(adapter);
return err;
}
@@ -2640,7 +2733,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = adapter->ahw->total_nic_func;
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
@@ -2737,12 +2830,58 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
return rv;
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct cmd_desc_type0 *tx_desc_info;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ tx_desc_info = &tx_ring->desc_head[i];
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_host_tx_ring *tx_ring;
int ring;
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_done(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2755,22 +2894,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- netdev_info(netdev, "Tx ring=%d\n", ring);
- netdev_info(netdev,
- "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
- readl(tx_ring->crb_cmd_producer),
- tx_ring->sw_consumer,
- le32_to_cpu(*(tx_ring->hw_consumer)));
- netdev_info(netdev,
- "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
- tx_ring->tx_stats.xmit_finished,
- tx_ring->tx_stats.xmit_called,
- tx_ring->tx_stats.xmit_on,
- tx_ring->tx_stats.xmit_off);
- }
+ qlcnic_dump_tx_rings(adapter);
adapter->ahw->reset_context = 1;
}
}
@@ -2793,7 +2917,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2832,7 +2956,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -2880,17 +3004,39 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ recv_ctx = adapter->recv_ctx;
- disable_irq(adapter->irq);
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_intr(adapter->irq, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_schedule(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ /* Only Multi-Tx queue capable devices need to
+ * schedule NAPI for TX rings
+ */
+ if ((qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_schedule(&tx_ring->napi);
+ }
}
- enable_irq(adapter->irq);
}
#endif
@@ -3286,7 +3432,8 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -3581,8 +3728,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3612,13 +3759,13 @@ pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3726,12 +3873,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return -EINVAL;
}
- if (ring_cnt < 2) {
- netdev_err(netdev,
- "%s rings value should not be lower than 2\n", buf);
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -3754,7 +3895,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return 0;
}
-int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
@@ -3775,12 +3916,6 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
qlcnic_teardown_intr(adapter);
- /* compute and set default and max tx/sds rings */
- qlcnic_set_tx_ring_count(adapter, tx_cnt);
- qlcnic_set_sds_ring_count(adapter, rx_cnt);
-
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-
err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3788,9 +3923,10 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
return err;
}
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 0daf660e12a1..396bd1fd1d27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -126,8 +126,8 @@ struct qlcnic_vport {
u16 handle;
u16 max_tx_bw;
u16 min_tx_bw;
+ u16 pvid;
u8 vlan_mode;
- u16 vlan;
u8 qos;
bool spoofchk;
u8 mac[6];
@@ -137,6 +137,8 @@ struct qlcnic_vf_info {
u8 pci_func;
u16 rx_ctx_id;
u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
unsigned long state;
struct completion ch_free_cmpl;
struct work_struct trans_work;
@@ -149,6 +151,7 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
+ struct mutex vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
@@ -185,7 +188,6 @@ void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
@@ -195,8 +197,13 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
-int qlcnic_sriov_vf_shutdown(struct pci_dev *);
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 21a4b274d2e4..0638c1810d54 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -35,7 +35,10 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -68,6 +71,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.change_l2_filter = qlcnic_83xx_change_l2_filter,
.get_board_info = qlcnic_83xx_get_port_info,
.free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
};
static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
@@ -176,6 +181,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
+ mutex_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -276,6 +282,11 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
if (qlcnic_sriov_pf_check(adapter))
qlcnic_sriov_pf_cleanup(adapter);
@@ -416,10 +427,15 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
if (!sriov->any_vlan)
return 0;
- sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
if (!sriov->allowed_vlans)
@@ -432,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
- struct qlcnic_info *info)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
@@ -473,14 +488,12 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
- if (err)
- return err;
-
if (qlcnic_83xx_get_port_info(adapter))
return -EIO;
@@ -500,7 +513,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
- struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -538,10 +550,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1417,7 +1428,7 @@ cleanup_transaction:
return rsp;
}
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
@@ -1447,18 +1458,27 @@ out:
return ret;
}
-void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head, tmp_list;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ vf = &adapter->ahw->sriov->vf_info[0];
INIT_LIST_HEAD(&tmp_list);
head = &adapter->vf_mc_list;
netif_addr_lock_bh(netdev);
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
list_move(&cur->list, &tmp_list);
}
@@ -1466,8 +1486,28 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
while (!list_empty(&tmp_list)) {
cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_list_s, list);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+ struct qlcnic_mac_vlan_list, list);
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ } else {
+ mutex_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id) {
+ qlcnic_nic_add_mac(adapter, bcast_addr,
+ vlan_id);
+ qlcnic_nic_add_mac(adapter,
+ cur->mac_addr,
+ vlan_id);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ }
+ }
list_del(&cur->list);
kfree(cur);
}
@@ -1490,13 +1530,24 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u16 vlan;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- vlan = adapter->ahw->sriov->vlan;
- __qlcnic_set_multi(netdev, vlan);
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev);
+
+ qlcnic_nic_set_promisc(adapter, mode);
}
static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
@@ -1584,8 +1635,6 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter->dcb);
-
return 0;
err_out_term_channel:
@@ -1833,18 +1882,60 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
-static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
- u16 vlan = sriov->vlan;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
u8 allowed = 0;
int i;
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
return -EINVAL;
if (enable) {
- if (vlan)
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
return -EINVAL;
if (sriov->any_vlan) {
@@ -1857,24 +1948,54 @@ static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
return -EINVAL;
}
} else {
- if (!vlan || vlan != vid)
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
return -EINVAL;
}
return 0;
}
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return;
+}
+
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
if (vid == 0)
return 0;
- ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
if (ret)
return ret;
@@ -1894,11 +2015,11 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
qlcnic_free_mac_list(adapter);
if (enable)
- sriov->vlan = vid;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
- sriov->vlan = 0;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_sriov_vf_set_multi(adapter->netdev);
+ qlcnic_set_multi(adapter->netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -1908,21 +2029,19 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
{
struct list_head *head = &adapter->mac_list;
- struct qlcnic_mac_list_s *cur;
- u16 vlan;
-
- vlan = adapter->ahw->sriov->vlan;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
- qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
- vlan, QLCNIC_MAC_DEL);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
-int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -1946,7 +2065,7 @@ int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
{
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
struct net_device *netdev = adapter->netdev;
@@ -1972,3 +2091,70 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 024f8161d2fe..09acf15c3a56 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -9,9 +9,14 @@
#include "qlcnic.h"
#include <linux/types.h>
-#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_MAC_STAR_ADD 6
+#define QLC_MAC_STAR_DEL 7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -64,9 +69,10 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_resources *res = &sriov->ff_max;
- u32 temp, num_vf_macs, num_vfs, max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
int ret = -EIO, vpid, id;
struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0)
@@ -75,16 +81,25 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
num_vfs = sriov->num_vfs;
max = num_vfs + 1;
info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = 1;
+
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
- num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
if (adapter->ahw->pci_func == func) {
- temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
- info->max_rx_ucast_mac_filters = temp;
- temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
- info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
@@ -93,8 +108,12 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
vp = sriov->vf_info[id].vp;
info->min_tx_bw = vp->min_tx_bw;
info->max_tx_bw = vp->max_tx_bw;
- info->max_rx_ucast_mac_filters = num_vf_macs;
- info->max_tx_mac_filters = num_vf_macs;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
info->max_tx_ques = QLCNIC_SINGLE_RING;
}
@@ -133,6 +152,25 @@ static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
}
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info)
{
@@ -166,6 +204,7 @@ static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
dev_info(&adapter->pdev->dev,
"\n\ttotal_pf: %d,\n"
@@ -310,6 +349,28 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
}
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
u8 func, u8 enable)
{
@@ -404,6 +465,8 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
qlcnic_sriov_pf_disable(adapter);
+ qlcnic_sriov_free_vlans(adapter);
+
qlcnic_sriov_pf_cleanup(adapter);
/* After disabling SRIOV re-init the driver in default mode
@@ -435,6 +498,12 @@ static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
if (err)
return err;
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
if (err)
goto disable_vlan_filtering;
@@ -512,6 +581,8 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
+ qlcnic_sriov_alloc_vlans(adapter);
+
err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
@@ -609,7 +680,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
if (vp->vlan_mode == QLC_PVID_MODE) {
cmd.req.arg[2] |= BIT_6;
- cmd.req.arg[3] |= vp->vlan << 8;
+ cmd.req.arg[3] |= vp->pvid << 8;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -644,10 +715,13 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
u16 func = vf->pci_func;
+ size_t size;
int err;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -657,8 +731,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
- if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -680,20 +758,23 @@ err_out:
}
static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp,
- u16 func, u16 vlan, u8 op)
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
u8 *addr;
int err;
u32 *buf;
int vpid;
+ vp = vf->vp;
+
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
return -ENOMEM;
- vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
goto out;
@@ -737,6 +818,35 @@ static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
return 0;
}
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ mutex_lock(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
@@ -744,7 +854,6 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_rcv_mbx_out *mbx_out;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_create_rx_ctx(cmd);
if (err) {
@@ -755,12 +864,10 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
cmd->req.arg[6] = vf->vp->handle;
err = qlcnic_issue_cmd(adapter, cmd);
- vlan = vf->vp->vlan;
if (!err) {
mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
vf->rx_ctx_id = mbx_out->ctx_id;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_ADD);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
} else {
vf->rx_ctx_id = 0;
}
@@ -844,7 +951,6 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
if (err) {
@@ -852,9 +958,7 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
return err;
}
- vlan = vf->vp->vlan;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_DEL);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
@@ -1102,6 +1206,13 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
+ if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
+ ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
+ netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
+ vf->pci_func);
+ return -EINVAL;
+ }
+
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
@@ -1121,7 +1232,7 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] &= ~0x7;
new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd->req.arg[3] |= vp->vlan << 16;
+ cmd->req.arg[3] |= vp->pvid << 16;
cmd->req.arg[1] |= new_op;
}
@@ -1191,8 +1302,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
@@ -1206,10 +1319,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
- cmd->rsp.arg[2] = 1 << 16;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
break;
case QLC_PVID_MODE:
- cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
break;
}
@@ -1217,24 +1330,27 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
}
static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vf_info *vf)
-
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
- if (!vp->vlan)
+ if (!qlcnic_sriov_check_any_vlan(vf))
return -EINVAL;
+ vlan = cmd->req.arg[1] >> 16;
if (!vf->rx_ctx_id) {
- vp->vlan = 0;
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
return 0;
}
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_DEL);
- vp->vlan = 0;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
return 0;
}
@@ -1242,32 +1358,37 @@ static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
int err = -EIO;
+ u16 vlan;
- if (vp->vlan)
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
return err;
+ vlan = cmd->req.arg[1] >> 16;
+
if (!vf->rx_ctx_id) {
- vp->vlan = cmd->req.arg[1] >> 16;
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return 0;
}
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_DEL);
- if (err)
- return err;
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
- vp->vlan = cmd->req.arg[1] >> 16;
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_ADD);
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
if (err) {
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
- vp->vlan = 0;
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
}
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return err;
}
@@ -1290,7 +1411,7 @@ static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
if (op)
err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
else
- err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
return err;
@@ -1300,8 +1421,6 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
- QLCNIC_CMD_DCB_QUERY_CAP,
- QLCNIC_CMD_DCB_QUERY_PARAM,
QLCNIC_CMD_INIT_NIC_FUNC,
QLCNIC_CMD_STOP_NIC_FUNC,
};
@@ -1597,7 +1716,8 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
}
if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@ -1767,20 +1887,22 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return -EOPNOTSUPP;
}
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
switch (vlan) {
case 4095:
- vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
vp->vlan_mode = QLC_NO_VLAN_MODE;
- vp->vlan = 0;
vp->qos = 0;
break;
default:
vp->vlan_mode = QLC_PVID_MODE;
- vp->vlan = vlan;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
vp->qos = qos;
+ vp->pvid = vlan;
}
netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@ -1795,7 +1917,7 @@ static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
switch (vp->vlan_mode) {
case QLC_PVID_MODE:
- vlan = vp->vlan;
+ vlan = vp->pvid;
break;
case QLC_GUEST_VLAN_MODE:
vlan = MAX_VLAN_ID;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 1a9f8a400e50..3d64113a35af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -6,7 +6,6 @@
*/
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
@@ -127,6 +126,8 @@ static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
if (kstrtoul(buf, 2, &h_beacon))
return -EINVAL;
+ qlcnic_get_beacon_state(adapter);
+
if (ahw->beacon_state == h_beacon)
return len;
@@ -158,7 +159,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
- u8 h_beacon_state, b_state, b_rate;
+ u8 b_state, b_rate;
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -168,18 +169,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
if (err)
return err;
- if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
- err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (err) {
- netdev_err(adapter->netdev,
- "Failed to get current beacon state\n");
- } else {
- if (h_beacon_state == QLCNIC_BEACON_DISABLE)
- ahw->beacon_state = 0;
- else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
- ahw->beacon_state = 2;
- }
- }
+ qlcnic_get_beacon_state(adapter);
if (ahw->beacon_state == b_state)
return len;
@@ -360,10 +350,28 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 count = 0;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return ahw->total_nic_func;
+
+ if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
+ count = QLC_DEFAULT_VNIC_COUNT;
+ else
+ count = ahw->max_vnic_func;
+
+ return count;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+
+ for (i = 0; i < pci_func_count; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -382,7 +390,6 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
-
if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
@@ -439,6 +446,8 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -455,17 +464,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ int i, pm_cfg_size;
u8 pci_func;
- if (size != sizeof(pm_cfg))
+ pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
+ if (size != pm_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&pm_cfg, 0,
- sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, pm_cfg_size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -477,26 +488,26 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
- memcpy(buf, &pm_cfg, size);
-
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
u32 op_mode;
u8 pci_func;
- int i, ret;
if (qlcnic_82xx_check(adapter))
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
else
- op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ if (pci_func >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -600,6 +611,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -629,16 +642,19 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ size_t esw_cfg_size;
u8 i, pci_func;
- if (size != sizeof(esw_cfg))
+ esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
+ if (size != esw_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&esw_cfg, 0,
- sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, esw_cfg_size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -650,9 +666,6 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
-
- memcpy(buf, &esw_cfg, size);
-
return size;
}
@@ -711,6 +724,8 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -726,27 +741,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ size_t np_cfg_size;
int i, ret;
- if (size != sizeof(np_cfg))
+ np_cfg_size = pci_func_count * sizeof(*np_cfg);
+ if (size != np_cfg_size)
return QL_STATUS_INVALID_PARAM;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(&np_cfg, 0,
- sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, np_cfg_size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
-
if (!adapter->npars[i].eswitch_status)
continue;
-
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
@@ -756,8 +772,6 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
-
- memcpy(buf, &np_cfg, size);
return size;
}
@@ -769,6 +783,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -778,7 +793,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -869,12 +884,13 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -898,14 +914,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
+ size_t pci_cfg_sz;
int i, ret;
- if (size != sizeof(pci_cfg))
+ pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
+ if (size != pci_cfg_sz)
return QL_STATUS_INVALID_PARAM;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -915,19 +934,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- memset(&pci_cfg, 0,
- sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ for (i = 0; i < pci_func_count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
pci_cfg[i].port_num = pci_info[i].default_port;
pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
- memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
}
@@ -1269,7 +1286,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_bridged_mode);
}
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1308,7 +1325,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create eswitch stats sysfs entry");
}
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 03517478e589..ef332708e5f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2248,7 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 6bc5db703920..829be21f97b2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
ql_queue_fw_error(qdev);
}
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
{
int i, status;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8dee1beb9854..c3c514e332b5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f705aeeba767..ce2cfddbed50 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -6,7 +6,6 @@
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1e49ec5b2232..819b74cefd64 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -34,7 +34,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -222,6 +221,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_READ))
break;
+ udelay(1);
}
if (limit < 0)
@@ -245,6 +245,7 @@ static int r6040_phy_write(void __iomem *ioaddr,
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_WRITE))
break;
+ udelay(1);
}
return (limit < 0) ? -ETIMEDOUT : 0;
@@ -834,8 +835,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < MISR)
- descptr->len = MISR;
+ if (skb->len < ETH_ZLEN)
+ descptr->len = ETH_ZLEN;
else
descptr->len = skb->len;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c737f0ea5de7..91a67ae8f17b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -21,7 +21,6 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index a30c4395b232..9e757c792d84 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A7790.
+ R8A7740, R8A777x and R8A779x.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d256ce19d4de..040cb94e8219 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2013 Renesas Solutions Corp.
@@ -13,15 +12,11 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -148,6 +143,65 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
+static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
+
+ [ECMR] = 0x0500,
+ [RFLR] = 0x0508,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008C,
+};
+
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[ECMR] = 0x0300,
[RFLR] = 0x0308,
@@ -314,12 +368,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
-static int sh_eth_is_gether(struct sh_eth_private *mdp)
+static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
- if (mdp->reg_offset == sh_eth_offset_gigabit)
- return 1;
- else
- return 0;
+ return mdp->reg_offset == sh_eth_offset_gigabit;
+}
+
+static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
+{
+ return mdp->reg_offset == sh_eth_offset_fast_rz;
}
static void sh_eth_select_mii(struct net_device *ndev)
@@ -395,8 +451,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
-/* R8A7790 */
-static struct sh_eth_cpu_data r8a7790_data = {
+/* R8A7790/1 */
+static struct sh_eth_cpu_data r8a779x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
@@ -646,8 +702,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.apr = 1,
@@ -705,6 +761,38 @@ static struct sh_eth_cpu_data r8a7740_data = {
.shift_rd0 = 1,
};
+/* R7S72100 */
+static struct sh_eth_cpu_data r7s72100_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+
+ .register_type = SH_ETH_REG_FAST_RZ,
+
+ .ecsr_value = ECSR_ICD,
+ .ecsipr_value = ECSIPR_ICDIP,
+ .eesipr_value = 0xff7f009f,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+ EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = RMCR_RNC,
+
+ .no_psr = 1,
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .hw_crc = 1,
+ .tsu = 1,
+ .shift_rd0 = 1,
+};
+
static struct sh_eth_cpu_data sh7619_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
@@ -732,7 +820,7 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
if (!cd->fcftr_value)
- cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
DEFAULT_FIFO_F_D_RFD;
if (!cd->fdr_value)
@@ -771,7 +859,7 @@ static int sh_eth_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret = 0;
- if (sh_eth_is_gether(mdp)) {
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
@@ -849,20 +937,17 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
return x;
}
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
+/* Program the hardware MAC address from dev->dev_addr. */
static void update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
- (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
- (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
- (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
-/*
- * Get MAC address from SuperH MAC address register
+/* Get MAC address from SuperH MAC address register
*
* SuperH's Ethernet device doesn't have 'ROM' to MAC address.
* This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
@@ -885,7 +970,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
{
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
return EDTRR_TRNS_GETHER;
else
return EDTRR_TRNS_ETHER;
@@ -1019,8 +1104,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- mdp->cur_rx = mdp->cur_tx = 0;
- mdp->dirty_rx = mdp->dirty_tx = 0;
+ mdp->cur_rx = 0;
+ mdp->cur_tx = 0;
+ mdp->dirty_rx = 0;
+ mdp->dirty_tx = 0;
memset(mdp->rx_ring, 0, rx_ringsize);
@@ -1033,7 +1120,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
@@ -1046,7 +1133,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
@@ -1067,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
@@ -1081,8 +1170,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
- /*
- * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
@@ -1257,7 +1345,7 @@ static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
- int freeNum = 0;
+ int free_num = 0;
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
@@ -1271,7 +1359,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
- freeNum++;
+ free_num++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
@@ -1280,7 +1368,7 @@ static int sh_eth_txfree(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length;
}
- return freeNum;
+ return free_num;
}
/* Packet receive function */
@@ -1313,12 +1401,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
- /*
- * In case of almost all GETHER/ETHERs, the Receive Frame State
+ /* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740's GETHER, the RFS
- * bits are from bit 25 to bit 16. So, the driver needs right
- * shifting by 16.
+ * bit 0. However, in case of the R8A7740, R8A779x, and
+ * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
desc_status >>= 16;
@@ -1374,7 +1461,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
@@ -1392,10 +1479,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE)
- mdp->cur_rx = mdp->dirty_rx =
- (sh_eth_read(ndev, RDFAR) -
- sh_eth_read(ndev, RDLAR)) >> 4;
+ if (intr_status & EESR_RDE) {
+ u32 count = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
+
+ mdp->cur_rx = count;
+ mdp->dirty_rx = count;
+ }
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
@@ -1438,17 +1528,17 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK))
+ if (!(link_stat & PHY_ST_LINK)) {
sh_eth_rcv_snd_disable(ndev);
- else {
+ } else {
/* Link Up */
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
- ~DMAC_M_ECI, EESIPR);
- /*clear int */
+ ~DMAC_M_ECI, EESIPR);
+ /* clear int */
sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
- ECSR);
+ ECSR);
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
- DMAC_M_ECI, EESIPR);
+ DMAC_M_ECI, EESIPR);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1517,11 +1607,11 @@ ignore_link:
if (intr_status & mask) {
/* Tx error */
u32 edtrr = sh_eth_read(ndev, EDTRR);
+
/* dmesg */
- dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
- intr_status, mdp->cur_tx);
- dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- mdp->dirty_tx, (u32) ndev->state, edtrr);
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ intr_status, mdp->cur_tx, mdp->dirty_tx,
+ (u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1644,7 +1734,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
}
if (!mdp->link) {
sh_eth_write(ndev,
- (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1671,7 +1762,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id , mdp->phy_id);
+ mdp->mii_bus->id, mdp->phy_id);
mdp->link = 0;
mdp->speed = 0;
@@ -1685,8 +1776,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
+ dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1703,15 +1794,13 @@ static int sh_eth_phy_start(struct net_device *ndev)
if (ret)
return ret;
- /* reset phy - this also wakes it from PDOWN */
- phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
phy_start(mdp->phydev);
return 0;
}
static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1725,7 +1814,7 @@ static int sh_eth_get_settings(struct net_device *ndev,
}
static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1801,7 +1890,7 @@ static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
}
static void sh_eth_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i = 0;
@@ -1818,7 +1907,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *sh_eth_gstrings_stats,
- sizeof(sh_eth_gstrings_stats));
+ sizeof(sh_eth_gstrings_stats));
break;
}
}
@@ -1953,9 +2042,10 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp))
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+ if (netif_msg_timer(mdp)) {
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
+ ndev->name, (int)sh_eth_read(ndev, EESR));
+ }
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2065,6 +2155,9 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (sh_eth_is_rz_fast_ether(mdp))
+ return &ndev->stats;
+
pm_runtime_get_sync(&mdp->pdev->dev);
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
@@ -2088,8 +2181,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
}
/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
- int cmd)
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
@@ -2209,7 +2301,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, c_addr);
- if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2344,8 +2436,7 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- /*
- * Initial condition is MCT = 1, PRM = 0.
+ /* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
@@ -2411,8 +2502,7 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
mdp->vlan_num_ids++;
- /*
- * The controller has one VLAN tag HW filter. So, if the filter is
+ /* The controller has one VLAN tag HW filter. So, if the filter is
* already enabled, the driver disables it and the filte
*/
if (mdp->vlan_num_ids > 1) {
@@ -2449,6 +2539,11 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
+ if (sh_eth_is_rz_fast_ether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ return;
+ }
+
sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
@@ -2528,7 +2623,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
@@ -2541,6 +2636,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
@@ -2566,6 +2663,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
+ case SH_ETH_REG_FAST_RZ:
+ reg_offset = sh_eth_offset_fast_rz;
+ break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
@@ -2739,7 +2839,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
@@ -2777,8 +2877,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_eth_runtime_nop(struct device *dev)
{
- /*
- * Runtime PM callback shared between ->runtime_suspend()
+ /* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
@@ -2805,9 +2904,11 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
+ { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
- { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f32c1692d310..6075915b88ec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
@@ -12,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -159,6 +155,7 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -171,10 +168,9 @@ enum {
#define SH2_SH3_SKB_RX_ALIGN 2
#endif
-/*
- * Register's bits
+/* Register's bits
*/
-/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
+/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */
enum EDSR_BIT {
EDSR_ENT = 0x01, EDSR_ENR = 0x02,
};
@@ -199,7 +195,7 @@ enum DMAC_T_BIT {
EDTRR_TRNS_ETHER = 0x01,
};
-/* EDRRR*/
+/* EDRRR */
enum EDRRR_R_BIT {
EDRRR_R = 0x01,
};
@@ -422,8 +418,7 @@ enum TSU_FWSLC_BIT {
#define TSU_VTAG_ENABLE 0x80000000
#define TSU_VTAG_VID_MASK 0x00000fff
-/*
- * The sh ether Tx buffer descriptors.
+/* The sh ether Tx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_txdesc {
@@ -437,10 +432,9 @@ struct sh_eth_txdesc {
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
-/*
- * The sh ether Rx buffer descriptors.
+/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_rxdesc {
@@ -454,7 +448,7 @@ struct sh_eth_rxdesc {
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
@@ -480,16 +474,16 @@ struct sh_eth_cpu_data {
unsigned long eesr_err_check;
/* hardware features */
- unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned long irq_flags; /* IRQ configuration flags */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
@@ -511,14 +505,14 @@ struct sh_eth_private {
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- spinlock_t lock;
- u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
- u32 rx_buf_sz; /* Based on MTU+slack. */
+ u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
/* MII transceiver section. */
- u32 phy_id; /* PHY ID */
+ u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
int link;
@@ -526,8 +520,8 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- int port; /* for TSU */
- int vlan_num_ids; /* for VLAN tag filter */
+ int port; /* for TSU */
+ int vlan_num_ids; /* for VLAN tag filter */
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a99739c5142c..1f4449ad8900 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c76571886011..69e4fd21adb4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/string.h>
@@ -356,7 +355,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
if (pkt_status & SEEQ_RSTAT_FIG) {
/* Packet is OK. */
/* We don't want to receive our own packets */
- if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
if (len > rx_copybreak) {
skb = rd->skb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..174a92f5fe51 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "workarounds.h"
+#include "selftest.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -52,31 +53,31 @@ struct efx_ef10_filter_table {
struct {
unsigned long spec; /* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress. STACK_OLD is
- * used to mark and sweep stack-owned MAC filters.
+/* BUSY flag indicates that an update is in progress. AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
-#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_STACK_UC_MAX 32
-#define EFX_EF10_FILTER_STACK_MC_MAX 256
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct {
u8 addr[ETH_ALEN];
u16 id;
- } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
- stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
- int stack_uc_count; /* negative for PROMISC */
- int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+ } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+ dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count; /* negative for PROMISC */
+ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
};
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ efx_ptp_probe(efx, NULL);
+
return 0;
fail3:
@@ -277,11 +280,17 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
return rc;
}
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ efx_ptp_remove(efx);
+
efx_mcdi_mon_remove(efx);
- /* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
if (nic_data->wc_membase)
@@ -669,10 +679,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_indir_table(efx);
+ efx_ef10_rx_push_rss_config(efx);
return 0;
}
+static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -703,6 +724,19 @@ static int efx_ef10_map_reset_flags(u32 *flags)
return -EINVAL;
}
+static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc = efx_mcdi_reset(efx, reset_type);
+
+ /* If it was a port reset, trigger reallocation of MC resources.
+ * Note that on an MC reset nothing needs to be done now because we'll
+ * detect the MC reset later and handle it then.
+ */
+ if (reset_type == RESET_TYPE_ALL && !rc)
+ efx_ef10_reset_mc_allocations(efx);
+ return rc;
+}
+
#define EF10_DMA_STAT(ext_name, mcdi_name) \
[EF10_STAT_ ## ext_name] = \
{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
@@ -764,8 +798,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -834,8 +868,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
- (1ULL << EF10_STAT_rx_dp_emerg_wait))
+ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -901,6 +935,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
stats[EF10_STAT_rx_good_bytes] =
stats[EF10_STAT_rx_bytes] -
stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1067,10 +1102,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
nic_data->warm_boot_count = rc;
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_filters = true;
- nic_data->must_restore_piobufs = true;
- nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx_ef10_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -1241,8 +1273,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
return;
fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+ tx_queue->queue);
}
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1288,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1265,7 +1297,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1408,12 +1441,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
- netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+ netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
@@ -1461,8 +1494,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -1481,13 +1515,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
-
- return;
-
-fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1530,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1510,7 +1539,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1677,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
/* IRQ return is ignored */
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1669,7 +1691,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1678,7 +1700,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1717,8 +1740,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
{
unsigned int rx_desc_ptr;
- WARN_ON(rx_queue->scatter_n == 0);
-
netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
"scattered RX aborted (dropping %u buffers)\n",
rx_queue->scatter_n);
@@ -1754,7 +1775,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
- WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+ netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
rx_queue = efx_channel_get_rx_queue(channel);
@@ -1765,17 +1789,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
/* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) {
- WARN_ON(rx_bytes != 0);
+ if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+ netdev_WARN(efx->net_dev,
+ "invalid RX abort: scatter_n=%u event="
+ EFX_QWORD_FMT "\n",
+ rx_queue->scatter_n,
+ EFX_QWORD_VAL(*event));
efx_ef10_handle_rx_abort(rx_queue);
return 0;
}
- if (unlikely(rx_queue->scatter_n != 0)) {
- /* Scattered packet completions cannot be
- * merged, so something has gone wrong.
- */
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
efx_ef10_handle_rx_bad_lbits(
rx_queue, next_ptr_lbits,
(rx_queue->removed_count +
@@ -1901,7 +1935,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the
* queue. Refill it here
*/
- efx_fast_push_rx_descriptors(&channel->rx_queue);
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break;
default:
netif_err(efx, hw, efx->net_dev,
@@ -2232,7 +2266,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
@@ -2257,6 +2293,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
outbuf, sizeof(outbuf), NULL);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
return rc;
}
@@ -2326,10 +2364,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
- !(saved_spec->priority ==
- EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK)) {
+ spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
@@ -2383,11 +2418,13 @@ found:
*/
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
/* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = ins_index;
goto out_unlock;
}
@@ -2427,8 +2464,11 @@ found:
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
@@ -2497,13 +2537,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
}
/* Remove a filter.
- * If !stack_requested, remove by ID
- * If stack_requested, remove by index
+ * If !by_index, remove by ID
+ * If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, bool stack_requested)
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
{
unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2527,26 +2567,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spin_unlock_bh(&efx->filter_lock);
schedule();
}
+
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority > priority ||
- (!stack_requested &&
+ if (!spec ||
+ (!by_index &&
efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
}
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = 0;
+ goto out_unlock;
+ }
+
+ if (!(priority_mask & (1U << spec->priority))) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
- /* Reset steering of a stack-owned filter */
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
struct efx_filter_spec new_spec = *spec;
- new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK);
+ EFX_FILTER_FLAG_RX_RSS);
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -2574,6 +2629,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
}
+
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
@@ -2586,7 +2642,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, false);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -2612,10 +2669,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
return rc;
}
-static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
- /* TODO */
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ return rc;
+ }
+
+ return 0;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
@@ -2716,8 +2787,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
rc = -EBUSY;
goto fail_unlock;
}
- EFX_WARN_ON_PARANOID(saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK);
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto fail_unlock;
@@ -3027,8 +3096,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
table->entry[filter_idx].handle);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
-
- WARN_ON(rc != 0);
+ if (rc)
+ netdev_WARN(efx->net_dev,
+ "filter_idx=%#x handle=%#llx\n",
+ filter_idx,
+ table->entry[filter_idx].handle);
kfree(spec);
}
@@ -3052,15 +3124,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
- n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
spin_unlock_bh(&efx->filter_lock);
@@ -3069,28 +3141,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
*/
netif_addr_lock_bh(net_dev);
if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
- table->stack_uc_count = -1;
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->dev_uc_count = -1;
} else {
- table->stack_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
ETH_ALEN);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->stack_uc_list[i].addr,
+ memcpy(table->dev_uc_list[i].addr,
uc->addr, ETH_ALEN);
i++;
}
}
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
- table->stack_mc_count = -1;
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->dev_mc_count = -1;
} else {
- table->stack_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->stack_mc_list[0].addr);
+ table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->stack_mc_list[i].addr,
+ memcpy(table->dev_mc_list[i].addr,
mc->addr, ETH_ALEN);
i++;
}
@@ -3098,90 +3170,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
netif_addr_unlock_bh(net_dev);
/* Insert/renew unicast filters */
- if (table->stack_uc_count >= 0) {
- for (i = 0; i < table->stack_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count >= 0) {
+ for (i = 0; i < table->dev_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_uc_list[i].addr);
+ table->dev_uc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to unicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_uc_list[i].id);
- table->stack_uc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_uc_list[i].id);
+ table->dev_uc_count = -1;
break;
}
- table->stack_uc_list[i].id = rc;
+ table->dev_uc_list[i].id = rc;
}
}
- if (table->stack_uc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_uc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_uc_count = 0;
+ table->dev_uc_count = 0;
} else {
- table->stack_uc_list[0].id = rc;
+ table->dev_uc_list[0].id = rc;
}
}
/* Insert/renew multicast filters */
- if (table->stack_mc_count >= 0) {
- for (i = 0; i < table->stack_mc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count >= 0) {
+ for (i = 0; i < table->dev_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_mc_list[i].addr);
+ table->dev_mc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to multicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_mc_list[i].id);
- table->stack_mc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_mc_list[i].id);
+ table->dev_mc_count = -1;
break;
}
- table->stack_mc_list[i].id = rc;
+ table->dev_mc_list[i].id = rc;
}
}
- if (table->stack_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_mc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_mc_count = 0;
+ table->dev_mc_count = 0;
} else {
- table->stack_mc_list[0].id = rc;
+ table->dev_mc_list[0].id = rc;
}
}
/* Remove filters that weren't renewed. Since nothing else
- * changes the STACK_OLD flag or removes these filters, we
+ * changes the AUTO_OLD flag or removes these filters, we
* don't need to hold the filter_lock while scanning for
* these filters.
*/
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_STACK_OLD) {
- if (efx_ef10_filter_remove_internal(efx,
- EFX_FILTER_PRI_REQUIRED,
- i, true) < 0)
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ if (efx_ef10_filter_remove_internal(
+ efx, 1U << EFX_FILTER_PRI_AUTO,
+ i, true) < 0)
remove_failed = true;
}
}
@@ -3195,6 +3263,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
#ifdef CONFIG_SFC_MTD
struct efx_ef10_nvram_type_info {
@@ -3213,6 +3362,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
@@ -3320,6 +3470,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
@@ -3329,13 +3592,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.fini = efx_port_dummy_op_void,
.map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
- .reset = efx_mcdi_reset,
+ .reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3609,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
- /* TODO: test_chip */
+ .test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3360,7 +3624,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3397,11 +3661,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3677,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index fd844b53e385..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
@@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_ioctl(efx, ifr, cmd);
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx,
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
- pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
@@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
- pci_set_drvdata(pci_dev, NULL);
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..dbd7b78fe01c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
/* Filters */
/**
@@ -134,17 +137,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
-/**
- * efx_farch_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-static inline void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- return efx->type->filter_clear_rx(efx, priority);
-}
-
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MC_BIST,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..229428915aa8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.int", NULL);
}
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
@@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT;
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
@@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ efx_ptp_describe_stats(efx, strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break;
}
}
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
spin_unlock_bh(&efx->stats_lock);
+
+ efx_ptp_update_stats(efx, data);
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -583,7 +591,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
@@ -596,7 +604,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
return -EINVAL;
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
@@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx_nic_push_rx_indir_table(efx);
+ efx->type->rx_push_rss_config(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..18d6f761f4d0 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
}
/**************************************************************************
*
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
* EEPROM/flash
*
**************************************************************************
@@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
struct falcon_board *board;
int rc;
+ efx->primary = efx; /* only one usable function per controller */
+
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
@@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ falcon_b0_rx_push_rss_config(efx);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
void falcon_stop_nic_stats(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = efx_port_dummy_op_void,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..f72489a105ca 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_farch_rx_push_indir_table(efx);
-
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
}
static void
-efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
- struct efx_farch_filter_spec *spec)
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
{
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
- spec->priority = EFX_FILTER_PRI_REQUIRED;
- spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ spec->priority = EFX_FILTER_PRI_AUTO;
+ spec->flags = (EFX_FILTER_FLAG_RX |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
rc = -EEXIST;
goto out;
}
- if (spec.priority < saved_spec->priority &&
- !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ if (spec.priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
- if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
- /* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
- rc = 0;
- goto out;
- }
- /* Retain the RX_STACK flag */
- spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
}
/* Insert the filter */
@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) ||
- spec->priority > priority)
+ spec->priority != priority)
return -ENOENT;
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ efx_farch_filter_init_rx_auto(efx, spec);
efx_farch_filter_push_rx_config(efx);
} else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- efx_farch_filter_remove(efx, table, filter_idx, priority);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+ efx_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
spin_unlock_bh(&efx->filter_lock);
}
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority);
+ return 0;
}
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i;
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_init_rx_auto(efx, spec);
__set_bit(i, table->used_bitmap);
}
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 63c77a557178..3ef298d3c47e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
@@ -78,19 +82,18 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
- * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
- * network stack. The filter must have a priority of
- * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
- * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
- * request with priority %EFX_FILTER_PRI_MANUAL will reset the
- * steering (but not remove the filter).
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
- EFX_FILTER_FLAG_RX_STACK = 0x04,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 4b0bd8a1514d..eb59abb57e85 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
unsigned int cmd;
size_t inlen;
size_t outlen;
+ bool quiet;
efx_mcdi_async_completer *complete;
unsigned long cookie;
/* followed by request/response buffer */
@@ -101,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
return 0;
}
@@ -191,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
@@ -402,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
struct efx_nic *efx = mcdi->efx;
struct efx_mcdi_async_param *async;
- size_t hdr_len, data_len;
+ size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
int rc;
if (cmpxchg(&mcdi->state,
@@ -444,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
outbuf = (efx_dword_t *)(async + 1);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
async->complete(efx, async->cookie, rc, outbuf, data_len);
kfree(async);
@@ -519,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- int rc;
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc)
- return rc;
- return efx_mcdi_rpc_finish(efx, cmd, inlen,
- outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -543,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
}
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context. It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen, size_t outlen,
- efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
struct efx_mcdi_async_param *async;
@@ -581,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
GFP_ATOMIC);
if (!async)
@@ -589,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
async->cmd = cmd;
async->inlen = inlen;
async->outlen = outlen;
+ async->quiet = quiet;
async->complete = complete;
async->cookie = cookie;
memcpy(async + 1, inbuf, inlen);
@@ -617,79 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
return rc;
}
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
-
- if (mcdi->mode == MCDI_MODE_POLL)
- rc = efx_mcdi_poll(efx);
- else
- rc = efx_mcdi_await_completion(efx);
-
- if (rc != 0) {
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
-
- if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
- netif_err(efx, hw, efx->net_dev,
- "MCDI request was completed without an event\n");
- rc = 0;
- }
-
- /* Close the race with efx_mcdi_ev_cpl() executing just too late
- * and completing a request we've just cancelled, by ensuring
- * that the seqno check therein fails.
- */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- ++mcdi->credits;
- spin_unlock_bh(&mcdi->iface_lock);
- }
-
- if (rc == 0) {
- size_t hdr_len, data_len;
-
- /* At the very least we need a memory barrier here to ensure
- * we pick up changes from efx_mcdi_ev_cpl(). Protect against
- * a spurious efx_mcdi_ev_cpl() running concurrently by
- * acquiring the iface_lock. */
- spin_lock_bh(&mcdi->iface_lock);
- rc = mcdi->resprc;
- hdr_len = mcdi->resp_hdr_len;
- data_len = mcdi->resp_data_len;
- spin_unlock_bh(&mcdi->iface_lock);
-
- BUG_ON(rc > 0);
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- if (rc == 0) {
- efx->type->mcdi_read_response(efx, outbuf, hdr_len,
- min(outlen, data_len));
- if (outlen_actual != NULL)
- *outlen_actual = data_len;
- } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
- ; /* Don't reset if MC_CMD_REBOOT returns EIO */
- else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
- } else
- netif_dbg(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
- if (rc == -EIO || rc == -EINTR) {
- msleep(MCDI_STATUS_SLEEP_MS);
- efx_mcdi_poll_reboot(efx);
- mcdi->new_epoch = true;
- }
- }
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
- efx_mcdi_release(mcdi);
- return rc;
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -834,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
spin_unlock(&mcdi->iface_lock);
}
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -867,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- netif_info(efx, hw, efx->net_dev,
- "MC Scheduler error address=0x%x\n", data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO);
break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
@@ -886,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
case MCDI_EVENT_CODE_TX_FLUSH:
case MCDI_EVENT_CODE_RX_FLUSH:
/* Two flush events will be sent: one to the same event
@@ -1000,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
/* We currently assume we have control of the external link
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
- outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
- (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1097,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1220,7 +1366,7 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1235,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
retry = 2;
do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
- if (rc)
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
return rc;
+ }
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EIO;
@@ -1319,17 +1469,18 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
}
-static int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_func(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+ MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
return rc;
}
@@ -1347,7 +1498,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1368,7 +1518,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
- return efx_mcdi_reset_port(efx);
+ return efx_mcdi_reset_func(efx);
}
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
@@ -1449,13 +1599,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1496,13 +1639,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1532,13 +1668,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1558,14 +1687,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1585,13 +1710,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1609,13 +1727,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1630,13 +1741,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 15816cacb548..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition {
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
+ u32 fn_flags;
};
#ifdef CONFIG_SFC_MCDI_MON
@@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
int efx_mcdi_poll_reboot(struct efx_nic *efx);
void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
*/
#define MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d72ad4fc3617..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
};
static const struct {
@@ -33,13 +42,13 @@ static const struct {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
@@ -47,36 +56,42 @@ static const struct {
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
- SENSOR(IN_VREF, "ref. voltage", IN, -1),
- SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
- SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
- SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
- SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
- SENSOR(FAN_0, NULL, COOL, -1),
- SENSOR(FAN_1, NULL, COOL, -1),
- SENSOR(FAN_2, NULL, COOL, -1),
- SENSOR(FAN_3, NULL, COOL, -1),
- SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
- SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
- SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
- SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
- SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
- SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
- SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
- SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
- "Controller int. temp. raw (at ADC)", IN, -1),
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
- "Controller int. temp. (via ADC)", TEMP, -1),
+ "Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
- const char *name = NULL, *state_txt;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
- if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
if (!name)
name = "No sensor name available";
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- type, name, state_txt, value);
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_MCDI_MON
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
#define MC_CMD_ERR_MAC_EXIST 0x1009
/* Slave core not present */
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
#define MC_CMD_ERR_CODE_OFST 0
@@ -390,6 +392,8 @@
* AOE_ERR_DATA)
*/
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
/* enum: the MC has detected an uncorrectable error */
#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -481,15 +489,32 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
#define FCDI_EVENT_CODE_TIMED_READ 0x5
/* enum: One or more PPS IN events */
#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define FCDI_EXTENDED_EVENT_LENMIN 16
-#define FCDI_EXTENDED_EVENT_LENMAX 248
-#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
/***********************************/
@@ -642,6 +683,10 @@
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Entering the main image via a copy of a single word from and to this
* address indicates that it should not attempt to start the datapath CPUs.
* This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x16
+#define MC_CMD_PTP_OP_MAX 0x1b
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1088,15 +1203,29 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
/* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
/* enum: Timestamp trigger GPIO not working */
#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
@@ -1198,6 +1341,62 @@
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
#define MC_CMD_MEDIA_SFP_PLUS 0x5
/* enum: 10GBaseT. */
#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
/* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
/* ID */
@@ -4226,7 +4452,7 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -6826,6 +7076,10 @@
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
@@ -6942,39 +7196,68 @@
/***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
/***********************************/
@@ -7026,6 +7309,15 @@
#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
/* Requested operation */
@@ -7135,6 +7512,37 @@
/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
efx->link_state.up = false;
- } else {
+ else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- }
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return;
- }
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return true;
- }
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
- int period = enable ? 1000 : 0;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, !!enable,
MAC_STATS_IN_CLEAR, clear,
- MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
MAC_STATS_IN_PERIODIC_CLEAR, 0,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
return rc;
}
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
}
int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 542a0d252ae0..af2b8c59a903 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
+struct hwtstamp_config;
struct efx_self_tests;
@@ -287,12 +288,9 @@ struct efx_rx_buffer {
* Used to facilitate sharing dma mappings between recycled rx buffers
* and those passed up to the kernel.
*
- * @refcnt: Number of struct efx_rx_buffer's referencing this page.
- * When refcnt falls to zero, the page is unmapped for dma
* @dma_addr: The dma address of this page.
*/
struct efx_rx_page_state {
- unsigned refcnt;
dma_addr_t dma_addr;
unsigned int __pad[0] ____cacheline_aligned;
@@ -362,10 +360,11 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-enum efx_rx_alloc_method {
- RX_ALLOC_METHOD_AUTO = 0,
- RX_ALLOC_METHOD_SKB = 1,
- RX_ALLOC_METHOD_PAGE = 2,
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
};
/**
@@ -407,6 +406,9 @@ enum efx_rx_alloc_method {
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
*/
struct efx_channel {
struct efx_nic *efx;
@@ -445,6 +447,10 @@ struct efx_channel {
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
};
/**
@@ -520,15 +526,6 @@ enum nic_state {
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
/* Forward declaration */
struct efx_nic;
@@ -651,6 +648,13 @@ struct vfdi_status;
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -694,6 +698,8 @@ struct vfdi_status;
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@@ -763,6 +769,7 @@ struct vfdi_status;
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -777,6 +784,9 @@ struct efx_nic {
/* The following fields should be written very rarely */
char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
struct pci_dev *pci_dev;
unsigned int port_num;
const struct efx_nic_type *type;
@@ -828,6 +838,7 @@ struct efx_nic {
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
+ int rx_packet_ts_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -852,10 +863,14 @@ struct efx_nic {
struct work_struct mac_work;
bool port_enabled;
+ bool mc_bist_for_other_fn;
bool port_initialized;
struct net_device *net_dev;
struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
@@ -907,6 +922,8 @@ struct efx_nic {
struct efx_ptp_data *ptp_data;
+ char *vpd_sn;
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -959,6 +976,7 @@ struct efx_mtd_partition {
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
@@ -997,7 +1015,7 @@ struct efx_mtd_partition {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
- * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1017,7 +1035,8 @@ struct efx_mtd_partition {
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
- * @filter_clear_rx: remove RX filters by priority
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
@@ -1037,6 +1056,12 @@ struct efx_mtd_partition {
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1046,6 +1071,7 @@ struct efx_mtd_partition {
* @max_dma_mask: Maximum possible DMA mask
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1055,6 +1081,7 @@ struct efx_mtd_partition {
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1077,6 +1104,7 @@ struct efx_nic_type {
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1105,7 +1133,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_indir_table)(struct efx_nic *efx);
+ void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1130,8 +1158,8 @@ struct efx_nic_type {
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
- void (*filter_clear_rx)(struct efx_nic *efx,
- enum efx_filter_priority priority);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
@@ -1155,6 +1183,9 @@ struct efx_nic_type {
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1165,6 +1196,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
@@ -1173,6 +1205,7 @@ struct efx_nic_type {
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
}
}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 91c63ec79c5f..a001fae1a8d7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -412,8 +412,8 @@ enum {
EF10_STAT_rx_dp_q_disabled_packets,
EF10_STAT_rx_dp_di_dropped_packets,
EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_emerg_fetch,
- EF10_STAT_rx_dp_emerg_wait,
+ EF10_STAT_rx_dp_hlb_fetch,
+ EF10_STAT_rx_dp_hlb_wait,
EF10_STAT_COUNT
};
@@ -554,12 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
@@ -678,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *);
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
@@ -747,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
-static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- efx->type->rx_push_indir_table(efx);
-}
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
@@ -774,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3dd39dcfe36b..eb75fbd11a01 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
#define SYNCHRONISATION_GRANULARITY_NS 200
/* Minimum permitted length of a (corrected) synchronisation time */
-#define MIN_SYNCHRONISATION_NS 120
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
/* Maximum permitted length of a (corrected) synchronisation time */
#define MAX_SYNCHRONISATION_NS 1000
@@ -195,26 +195,29 @@ struct efx_ptp_event_rx {
/**
* struct efx_ptp_timeset - Synchronisation between host and MC
* @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
* @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
* host end time being seen
* @window: Difference of host_end and host_start
* @valid: Whether this timeset is valid
*/
struct efx_ptp_timeset {
u32 host_start;
- u32 seconds;
- u32 nanoseconds;
+ u32 major;
+ u32 minor;
u32 host_end;
- u32 waitns;
+ u32 wait;
u32 window; /* Derived: end - start, allowing for wrap */
};
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
* @rxq: Receive queue (awaiting timestamps)
* @txq: Transmit queue
* @evt_list: List of MC receive events awaiting packets
@@ -231,41 +234,42 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
* @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
* @pps_work: pps work task for handling pps events
* @pps_workwq: pps work queue
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
* @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @fast_syncs: Number of synchronisations requiring short delay
* @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
* @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- * Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
*/
struct efx_ptp_data {
+ struct efx_nic *efx;
struct efx_channel *channel;
+ bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct list_head evt_list;
@@ -282,14 +286,22 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
- unsigned last_sync_ns;
- unsigned base_sync_ns;
- bool base_sync_valid;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -297,6 +309,16 @@ struct efx_ptp_data {
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -309,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ memset(outbuf, 0, sizeof(outbuf));
+ }
+ efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
- efx->ptp_data->channel->channel);
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
/* Disable MCDI PTP support.
@@ -332,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -404,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
unsigned start_ns, end_ns;
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
- timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
- timeset->nanoseconds = MCDI_DWORD(data,
- PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
- timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -437,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
MCDI_VAR_ARRAY_LEN(response_length,
PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
- unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
struct timespec delta;
+ ktime_t mc_time;
if (number_readings == 0)
return -EAGAIN;
- /* Read the set of results and increment stats for any results that
- * appera to be erroneous.
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- }
- /* Find the last good host-MC synchronization result. The MC times
- * when it finishes reading the host time so the corrected window time
- * should be fairly constant for a given platform.
- */
- total = 0;
- for (i = 0; i < number_readings; i++)
- if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
- unsigned win;
-
- win = ptp->timeset[i].window - ptp->timeset[i].waitns;
- if (win >= MIN_SYNCHRONISATION_NS &&
- win < MAX_SYNCHRONISATION_NS) {
- total += ptp->timeset[i].window;
- ngood++;
- last_good = i;
- }
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->oversize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->undersize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
}
+ }
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns\n",
- ptp->base_sync_ns);
+ "PTP no suitable synchronisations\n");
return -EAGAIN;
}
- /* Average minimum this synchronisation */
- ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
- if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
- ptp->base_sync_valid = true;
- ptp->base_sync_ns = ptp->last_sync_ns;
- }
+ /* Convert the NIC time into kernel time. No correction is required-
+ * this time is the output of a firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
- delta.tv_nsec =
- ptp->timeset[last_good].nanoseconds +
+ delta = ktime_to_timespec(mc_time);
+ delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
@@ -553,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
loops++;
}
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
@@ -561,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
synch_buf, sizeof(synch_buf),
&response_length);
- if (rc == 0)
+ if (rc == 0) {
rc = efx_ptp_process_times(efx, synch_buf, response_length,
&last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
return rc;
}
@@ -602,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
memset(&timestamps, 0, sizeof(timestamps));
- timestamps.hwtstamp = ktime_set(
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
skb_tstamp_tx(skb, &timestamps);
@@ -622,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
struct list_head *cursor;
struct list_head *next;
+ if (ptp->rx_ts_inline)
+ return;
+
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
if (!list_empty(&ptp->evt_list)) {
@@ -655,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
struct efx_ptp_match *match;
enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
spin_lock_bh(&ptp->evt_lock);
evts_waiting = !list_empty(&ptp->evt_list);
spin_unlock_bh(&ptp->evt_lock);
@@ -696,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
*/
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- bool rc = false;
struct sk_buff *skb;
while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -713,13 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (efx_ptp_match_rx(efx, skb) ==
PTP_PACKET_STATE_MATCHED) {
- rc = true;
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- if (net_ratelimit())
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ ++ptp->rx_no_timestamp;
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -727,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
break;
}
}
-
- return rc;
}
/* Complete processing of a received packet */
@@ -739,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
local_bh_enable();
}
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_filter_spec rxfilter;
int rc;
- ptp->reset_required = false;
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
@@ -778,23 +1090,37 @@ static int efx_ptp_start(struct efx_nic *efx)
goto fail;
ptp->rxfilter_general = rc;
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
rc = efx_ptp_enable(efx);
if (rc != 0)
- goto fail2;
+ goto fail;
ptp->evt_frag_idx = 0;
ptp->current_adjfreq = 0;
- ptp->rxfilter_installed = true;
return 0;
-fail2:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
-
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
@@ -810,13 +1136,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
rc = efx_ptp_disable(efx);
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
- }
+ efx_ptp_remove_multicast_filters(efx);
/* Make sure RX packets are really delivered */
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -844,7 +1164,7 @@ static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
container_of(work, struct efx_ptp_data, pps_work);
- struct efx_nic *efx = ptp->channel->efx;
+ struct efx_nic *efx = ptp->efx;
struct ptp_clock_event ptp_evt;
if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -855,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
ptp_clock_event(ptp->phc_clock, &ptp_evt);
}
-/* Process any pending transmissions and timestamp any received packets.
- */
static void efx_ptp_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp_data =
container_of(work, struct efx_ptp_data, work);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
struct sk_buff *skb;
struct sk_buff_head tempq;
@@ -874,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_drop_time_expired_events(efx);
__skb_queue_head_init(&tempq);
- if (efx_ptp_process_events(efx, &tempq) ||
- !skb_queue_empty(&ptp_data->txq)) {
+ efx_ptp_process_events(efx, &tempq);
- while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
- }
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
}
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime = efx_phc_gettime,
+ .settime = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
- struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp;
int rc = 0;
unsigned int pos;
- channel->irq_moderation = 0;
- channel->rx_queue.core_index = 0;
-
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
return -ENOMEM;
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
- ptp->channel = channel;
skb_queue_head_init(&ptp->rxq);
skb_queue_head_init(&ptp->txq);
ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -929,33 +1255,32 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
ptp->evt_overflow = false;
- ptp->phc_clock_info.owner = THIS_MODULE;
- snprintf(ptp->phc_clock_info.name,
- sizeof(ptp->phc_clock_info.name),
- "%pm", efx->net_dev->perm_addr);
- ptp->phc_clock_info.max_adj = MAX_PPB;
- ptp->phc_clock_info.n_alarm = 0;
- ptp->phc_clock_info.n_ext_ts = 0;
- ptp->phc_clock_info.n_per_out = 0;
- ptp->phc_clock_info.pps = 1;
- ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
- ptp->phc_clock_info.adjtime = efx_phc_adjtime;
- ptp->phc_clock_info.gettime = efx_phc_gettime;
- ptp->phc_clock_info.settime = efx_phc_settime;
- ptp->phc_clock_info.enable = efx_phc_enable;
-
- ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
- &efx->pci_dev->dev);
- if (IS_ERR(ptp->phc_clock)) {
- rc = PTR_ERR(ptp->phc_clock);
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
goto fail3;
- }
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
ptp->nic_ts_enabled = false;
@@ -976,14 +1301,27 @@ fail1:
return rc;
}
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
if (!efx->ptp_data)
return;
- (void)efx_ptp_disable(channel->efx);
+ (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work);
@@ -991,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- ptp_clock_unregister(efx->ptp_data->phc_clock);
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
destroy_workqueue(efx->ptp_data->workwq);
- destroy_workqueue(efx->ptp_data->pps_workwq);
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
}
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
static void efx_ptp_get_channel_name(struct efx_channel *channel,
char *buf, size_t len)
{
@@ -1080,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
- struct skb_shared_hwtstamps *timestamps;
-
match->state = PTP_PACKET_STATE_UNMATCHED;
- /* Clear all timestamps held: filled in later */
- timestamps = skb_hwtstamps(skb);
- memset(timestamps, 0, sizeof(*timestamps));
-
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
@@ -1132,8 +1471,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
- unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
@@ -1177,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
- bool enable_wanted = false;
- unsigned int new_mode;
int rc;
if (init->flags)
@@ -1188,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
- new_mode = efx->ptp_data->mode;
- /* Determine whether any PTP HW operations are required */
- switch (init->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V1;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Although these three are accepted only IPV4 packets will be
- * timestamped
- */
- init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /* Non-IP + IPv6 timestamping not supported */
- return -ERANGE;
- break;
- default:
- return -ERANGE;
- }
-
- if (init->tx_type != HWTSTAMP_TX_OFF)
- enable_wanted = true;
-
- /* Old versions of the firmware do not support the improved
- * UUID filtering option (SF bug 33070). If the firmware does
- * not accept the enhanced mode, fall back to the standard PTP
- * v2 UUID filtering.
- */
- rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
- if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
- rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
- if (rc != 0)
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
return rc;
efx->ptp_data->config = *init;
-
return 0;
}
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
if (!ptp)
return;
@@ -1252,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
{
struct hwtstamp_config config;
int rc;
@@ -1283,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
? -EFAULT : 0;
}
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
{
struct efx_ptp_data *ptp = efx->ptp_data;
@@ -1302,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
struct efx_ptp_event_rx *evt = NULL;
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
if (ptp->evt_frag_idx != 3) {
ptp_event_failure(efx, 3);
return;
@@ -1320,9 +1627,10 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
MCDI_EVENT_SRC) << 8) |
(EFX_QWORD_FIELD(ptp->evt_frags[0],
MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = ktime_set(
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1397,12 +1705,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
}
}
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1432,18 +1827,20 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
+ u32 nic_major, nic_minor;
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
- struct timespec delta_ts = ns_to_timespec(delta);
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -1453,10 +1850,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
+ ktime_t kt;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1466,8 +1864,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
if (rc != 0)
return rc;
- ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
- ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec(kt);
return 0;
}
@@ -1519,7 +1919,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.keep_eventq = false,
};
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
{
/* Check whether PTP is implemented on this NIC. The DISABLE
* operation will succeed if and only if it is implemented.
@@ -1533,9 +1933,15 @@ void efx_ptp_start_datapath(struct efx_nic *efx)
{
if (efx_ptp_restart(efx))
netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
}
void efx_ptp_stop_datapath(struct efx_nic *efx)
{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
efx_ptp_stop(efx);
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 42488df1f4ec..48588ddf81b0 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do {
- rc = efx_init_rx_buffers(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
@@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(efx, eh);
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
if (unlikely(skb == NULL))
return NULL;
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
- memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
@@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ efx_rx_skb_attach_timestamp(channel, skb);
+
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_reset;
}
- if ((tests->registers < 0) && !rc_test)
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
+ int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..23f3a6f7737a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
/**************************************************************************
*
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
* Device reset
*
**************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
- efx_ptp_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -273,6 +321,31 @@ fail1:
return rc;
}
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
@@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
-
- /* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
- 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
- EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
- FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ siena_rx_push_rss_config(efx);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +896,6 @@ fail:
/**************************************************************************
*
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
@@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
@@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
};
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ffa78432164d..7984ad05357d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -30,7 +30,6 @@
#define IOC3_NAME "ioc3-eth"
#define IOC3_VERSION "2.6.3-4"
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 513ed8b1ba58..5564a5fa3385 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -10,7 +10,6 @@
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 975dc2d8e548..ff57a46388ee 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -576,7 +576,6 @@ err_unmap_tx:
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
- pci_set_drvdata(pci_dev, NULL);
pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
@@ -2427,7 +2426,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 0f096a890059..c50fb08c9905 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* watchdog = TX watchdog timeout
@@ -55,7 +54,6 @@ static const char version[] =
)
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 9965da39281b..04b35f55df97 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -15,8 +15,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN9118
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 8ef70d9c20c1..c7a4868571f9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8bf29eb4a5a0..839c0e6cca01 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* io = for the base address
@@ -66,7 +65,6 @@ static const char version[] =
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1895,7 +1893,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
SMC_SELECT_BANK(lp, 1);
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
- if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+ if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
CARDNAME, ioaddr, val);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 749654b976bc..47dce918eb0f 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -18,8 +18,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN91C111
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8564f23a6796..6382b7c416f4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
* Rewritten, heavily based on smsc911x simple driver by SMSC.
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d39a03..23953957fed8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************/
#ifndef __SMSC911X_H__
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index f433d97aa097..d3b967aff9e0 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
@@ -1541,7 +1540,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- err = pci_enable_wake(pdev, 0, 0);
+ err = pci_enable_wake(pdev, PCI_D0, 0);
if (err)
netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
err);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a2..c63c76381af6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6e52c0f74cd9..e2f202e3932f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
select PHYLIB
select CRC32
select PTP_1588_CLOCK
+ select RESET_CONTROLLER
---help---
This is the driver for the Ethernet IPs are built around a
Synopsys IP Core and only tested on the STMicroelectronics
@@ -25,6 +26,17 @@ config STMMAC_PLATFORM
If unsure, say N.
+config DWMAC_SUNXI
+ bool "Allwinner GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_SUNXI
+ default y
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 356a9dd32be7..ecadecea79b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
+stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d234ab540b29..72d282bf33a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -51,6 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -62,7 +63,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len -= bmax;
i++;
} else {
@@ -73,7 +73,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len = 0;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc94f202a43e..7834a3993946 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,7 +29,6 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/module.h>
-#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -293,6 +292,8 @@ struct dma_features {
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
+#define JUMBO_LEN 9000
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -369,7 +370,7 @@ struct stmmac_dma_ops {
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (void __iomem *ioaddr);
+ void (*core_init) (void __iomem *ioaddr, int mtu);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc) (void __iomem *ioaddr);
/* Dump MAC registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
new file mode 100644
index 000000000000..771cd15fca18
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -0,0 +1,140 @@
+/**
+ * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
+ *
+ * Copyright (C) 2013 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+
+struct sunxi_priv_data {
+ int interface;
+ int clk_enabled;
+ struct clk *tx_clk;
+ struct regulator *regulator;
+};
+
+static void *sun7i_gmac_setup(struct platform_device *pdev)
+{
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return gmac->tx_clk;
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ return gmac;
+}
+
+#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
+#define SUN7I_GMAC_MII_RATE 25000000
+
+static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret)
+ return ret;
+ }
+
+ /* Set GMAC interface port mode
+ *
+ * The GMAC TX clock lines are configured by setting the clock
+ * rate, which then uses the auto-reparenting feature of the
+ * clock driver, and enabling/disabling the clock.
+ */
+ if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+
+ return 0;
+}
+
+static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static void sun7i_fix_speed(void *priv, unsigned int speed)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ /* only GMII mode requires us to reconfigure the clock lines */
+ if (gmac->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+}
+
+/* of_data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+const struct stmmac_of_data sun7i_gmac_data = {
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .fix_mac_speed = sun7i_fix_speed,
+ .setup = sun7i_gmac_setup,
+ .init = sun7i_gmac_init,
+ .exit = sun7i_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c12aabb8cf93..f37d90f114f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,11 +126,8 @@ enum power_event {
#define GMAC_ANE_PSE (3 << 7)
#define GMAC_ANE_PSE_SHIFT 7
- /* GMAC Configuration defines */
-#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
-
/* GMAC Configuration defines */
+#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
@@ -156,7 +153,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cdd926832e27..b3e148ef5683 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -32,10 +32,15 @@
#include <asm/io.h>
#include "dwmac1000.h"
-static void dwmac1000_core_init(void __iomem *ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + GMAC_CONTROL);
value |= GMAC_CORE_INIT;
+ if (mtu > 1500)
+ value |= GMAC_CONTROL_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONTROL_JE;
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 5857d677dac1..2ff767bcfdd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(void __iomem *ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + MAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 1ef9d8a555aa..a96c7c2f5f3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -58,6 +58,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
wmb();
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
@@ -73,7 +74,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE);
wmb();
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 22f89ffdfd95..d9af26ed58ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
+#include <linux/reset.h>
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
@@ -91,6 +92,7 @@ struct stmmac_priv {
int wolopts;
int wol_irq;
struct clk *stmmac_clk;
+ struct reset_control *stmmac_rst;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -105,21 +107,19 @@ struct stmmac_priv {
unsigned int default_addend;
u32 adv_ts;
int use_riwt;
+ int irq_wake;
spinlock_t ptp_lock;
};
-extern int phyaddr;
-
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
+int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_freeze(struct net_device *ndev);
-int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
@@ -130,6 +130,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_STMMAC_PLATFORM
+#ifdef CONFIG_DWMAC_SUNXI
+extern const struct stmmac_of_data sun7i_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 797b56a0efc4..a2e7d2c96e36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -43,6 +43,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -50,9 +51,9 @@
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
+#include <linux/reset.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
-#define JUMBO_LEN 9000
/* Module parameters */
#define TX_TIMEO 5000
@@ -64,7 +65,7 @@ static int debug = -1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
-int phyaddr = -1;
+static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -91,7 +92,7 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
static int buf_sz = DMA_BUFFER_SIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -332,7 +333,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
/* exit if skb doesn't support hw tstamp */
- if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
if (priv->adv_ts)
@@ -776,6 +777,7 @@ static int stmmac_init_phy(struct net_device *dev)
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
+ int max_speed = priv->plat->max_speed;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
@@ -800,7 +802,8 @@ static int stmmac_init_phy(struct net_device *dev)
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII))
+ (interface == PHY_INTERFACE_MODE_RMII) ||
+ (max_speed < 1000 && max_speed > 0))
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
@@ -990,70 +993,12 @@ static int init_dma_desc_rings(struct net_device *dev)
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+ priv->dma_buf_sz = bfsize;
+
if (netif_msg_probe(priv))
pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
txsize, rxsize, bfsize);
- if (priv->extend_desc) {
- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
-
- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
-
- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- goto err_dma;
- }
- }
-
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- goto err_rx_skbuff_dma;
-
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1079,7 +1024,6 @@ static int init_dma_desc_rings(struct net_device *dev)
}
priv->cur_rx = 0;
priv->dirty_rx = (unsigned int)(i - rxsize);
- priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
/* Setup the chained descriptor addresses */
@@ -1121,30 +1065,6 @@ static int init_dma_desc_rings(struct net_device *dev)
err_init_rx_buffers:
while (--i >= 0)
stmmac_free_rx_buffers(priv, i);
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
-err_rx_skbuff_dma:
- if (priv->extend_desc) {
- dma_free_coherent(priv->device, priv->dma_tx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- }
-err_dma:
return ret;
}
@@ -1161,25 +1081,107 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
int i;
for (i = 0; i < priv->dma_tx_size; i++) {
- if (priv->tx_skbuff[i] != NULL) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
+ struct dma_desc *p;
- if (priv->tx_skbuff_dma[i])
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i]) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[i] = 0;
+ }
+
+ if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i] = 0;
}
}
}
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ int ret = -ENOMEM;
+
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ return -ENOMEM;
+
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ }
+
+ return 0;
+
+err_dma:
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+ return ret;
+}
+
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
/* Release the DMA TX/RX socket buffers */
@@ -1522,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
+ pr_info("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
}
- pr_warn("%s: device MAC address %pM\n", priv->dev->name,
- priv->dev->dev_addr);
}
/**
@@ -1589,6 +1591,86 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
+ * stmmac_hw_setup: setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function sets up the ip in a usable state.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ return ret;
+ }
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ pr_err("%s: DMA engine initialization failed\n", __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* If required, perform hw setup of the bus. */
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_set_mac(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warn("%s: failed debugFS registration\n", __func__);
+#endif
+ /* Start the ball rolling... */
+ pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
+ }
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+
+ priv->eee_enabled = stmmac_eee_init(priv);
+
+ stmmac_init_tx_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ }
+
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
+ return 0;
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -1602,8 +1684,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- clk_prepare_enable(priv->stmmac_clk);
-
stmmac_check_ether_addr(priv);
if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
@@ -1616,33 +1696,29 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- ret = init_dma_desc_rings(dev);
+ alloc_dma_desc_resources(priv);
if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
}
- /* DMA initialization and SW reset */
- ret = stmmac_init_dma_engine(priv);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
- pr_err("%s: DMA engine initialization failed\n", __func__);
+ pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
- /* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
-
- /* If required, perform hw setup of the bus. */
- if (priv->plat->bus_setup)
- priv->plat->bus_setup(priv->ioaddr);
-
- /* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->ioaddr);
+ if (priv->phydev)
+ phy_start(priv->phydev);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -1675,55 +1751,6 @@ static int stmmac_open(struct net_device *dev)
}
}
- /* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
-
- /* Set the HW DMA mode and the COE */
- stmmac_dma_operation_mode(priv);
-
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
- stmmac_mmc_setup(priv);
-
- ret = stmmac_init_ptp(priv);
- if (ret)
- pr_warn("%s: failed PTP initialisation\n", __func__);
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(dev);
- if (ret < 0)
- pr_warn("%s: failed debugFS registration\n", __func__);
-#endif
- /* Start the ball rolling... */
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
-
- /* Dump DMA/MAC registers */
- if (netif_msg_hw(priv)) {
- priv->hw->mac->dump_regs(priv->ioaddr);
- priv->hw->dma->dump_regs(priv->ioaddr);
- }
-
- if (priv->phydev)
- phy_start(priv->phydev);
-
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
-
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
- if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
- priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
- }
-
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
-
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1794,7 +1821,6 @@ static int stmmac_release(struct net_device *dev)
#ifdef CONFIG_STMMAC_DEBUG_FS
stmmac_exit_fs();
#endif
- clk_disable_unprepare(priv->stmmac_clk);
stmmac_release_ptp(priv);
@@ -1844,8 +1870,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
- priv->tx_skbuff[entry] = skb;
-
/* To program the descriptors according to the size of the frame */
if (priv->mode == STMMAC_RING_MODE) {
is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
@@ -1873,6 +1897,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -1882,7 +1907,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
- priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
@@ -1890,6 +1914,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
}
+ priv->tx_skbuff[entry] = skb;
+
/* Finalize the latest segment. */
priv->hw->desc->close_tx_desc(desc);
@@ -1951,6 +1977,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ethhdr *ehdr;
+ u16 vlanid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
+ NETIF_F_HW_VLAN_CTAG_RX &&
+ !__vlan_get_tag(skb, &vlanid)) {
+ /* pop the vlan tag */
+ ehdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+ }
+}
+
+
/**
* stmmac_rx_refill: refill used skb preallocated buffers
* @priv: driver private structure
@@ -2102,6 +2145,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
print_pkt(skb->data, frame_len);
}
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe))
@@ -2229,6 +2274,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ if (priv->plat->maxmtu < max_mtu)
+ max_mtu = priv->plat->maxmtu;
+
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
return -EINVAL;
@@ -2276,6 +2324,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
@@ -2680,10 +2731,32 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
+ priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_clk)) {
+ dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
+ __func__);
+ ret = PTR_ERR(priv->stmmac_clk);
+ goto error_clk_get;
+ }
+ clk_prepare_enable(priv->stmmac_clk);
+
+ priv->stmmac_rst = devm_reset_control_get(priv->device,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_rst)) {
+ if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error_hw_init;
+ }
+ dev_info(priv->device, "no reset control found\n");
+ priv->stmmac_rst = NULL;
+ }
+ if (priv->stmmac_rst)
+ reset_control_deassert(priv->stmmac_rst);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
- goto error_free_netdev;
+ goto error_hw_init;
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -2721,12 +2794,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
- if (IS_ERR(priv->stmmac_clk)) {
- pr_warn("%s: warning: cannot get CSR clock\n", __func__);
- goto error_clk_get;
- }
-
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -2754,15 +2821,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
return priv;
error_mdio_register:
- clk_put(priv->stmmac_clk);
-error_clk_get:
unregister_netdev(ndev);
error_netdev_register:
netif_napi_del(&priv->napi);
-error_free_netdev:
+error_hw_init:
+ clk_disable_unprepare(priv->stmmac_clk);
+error_clk_get:
free_netdev(ndev);
- return NULL;
+ return ERR_PTR(ret);
}
/**
@@ -2786,6 +2853,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ if (priv->stmmac_rst)
+ reset_control_assert(priv->stmmac_rst);
+ clk_disable_unprepare(priv->stmmac_clk);
free_netdev(ndev);
return 0;
@@ -2817,10 +2887,12 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_clear_descriptors(priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
- else {
+ priv->irq_wake = 1;
+ } else {
stmmac_set_mac(priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->stmmac_clk);
}
@@ -2844,18 +2916,21 @@ int stmmac_resume(struct net_device *ndev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, 0);
- else
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
clk_prepare_enable(priv->stmmac_clk);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
netif_device_attach(ndev);
- /* Enable the MAC and DMA */
- stmmac_set_mac(priv->ioaddr, true);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
+ stmmac_hw_setup(ndev);
napi_enable(&priv->napi);
@@ -2868,22 +2943,6 @@ int stmmac_resume(struct net_device *ndev)
return 0;
}
-
-int stmmac_freeze(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_release(ndev);
-}
-
-int stmmac_restore(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_open(ndev);
-}
#endif /* CONFIG_PM */
/* Driver can be configured w/ and w/ both PCI and Platf drivers
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fe7bc9903867..a468eb107823 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -128,7 +128,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
* @bus: points to the mii_bus structure
* Description: reset the MII bus
*/
-static int stmmac_mdio_reset(struct mii_bus *bus)
+int stmmac_mdio_reset(struct mii_bus *bus)
{
#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
@@ -166,7 +166,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
udelay(data->delays[1]);
gpio_set_value(reset_gpio, active_low ? 1 : 0);
udelay(data->delays[2]);
- gpio_free(reset_gpio);
}
}
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644d80ece067..291608924849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,9 +26,9 @@
#include <linux/pci.h>
#include "stmmac.h"
-struct plat_stmmacenet_data plat_dat;
-struct stmmac_mdio_bus_data mdio_data;
-struct stmmac_dma_cfg dma_cfg;
+static struct plat_stmmacenet_data plat_dat;
+static struct stmmac_mdio_bus_data mdio_data;
+static struct stmmac_dma_cfg dma_cfg;
static void stmmac_default_data(void)
{
@@ -100,9 +100,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
stmmac_default_data();
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(priv);
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 51c9069ef405..5884a7d2063b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -26,8 +26,23 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_device.h>
#include "stmmac.h"
+static const struct of_device_id stmmac_dt_ids[] = {
+#ifdef CONFIG_DWMAC_SUNXI
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+#endif
+ /* SoC specific glue layers should come before generic bindings */
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
#ifdef CONFIG_OF
static int stmmac_probe_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
@@ -35,23 +50,63 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
+ const struct of_device_id *device;
if (!np)
return -ENODEV;
+ device = of_match_device(stmmac_dt_ids, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ if (device->data) {
+ const struct stmmac_of_data *data = device->data;
+ plat->has_gmac = data->has_gmac;
+ plat->enh_desc = data->enh_desc;
+ plat->tx_coe = data->tx_coe;
+ plat->rx_coe = data->rx_coe;
+ plat->bugged_jumbo = data->bugged_jumbo;
+ plat->pmt = data->pmt;
+ plat->riwt_off = data->riwt_off;
+ plat->fix_mac_speed = data->fix_mac_speed;
+ plat->bus_setup = data->bus_setup;
+ plat->setup = data->setup;
+ plat->free = data->free;
+ plat->init = data->init;
+ plat->exit = data->exit;
+ }
+
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
+ /* Get max speed of operation from device tree */
+ if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+ plat->max_speed = -1;
+
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
plat->bus_id = 0;
- of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr);
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ /* "snps,phy-addr" is not a standard property. Mark it as deprecated
+ * and warn of its use. Remove this when phy node support is added.
+ */
+ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode");
+
+ /* Set the maxmtu to a default of JUMBO_LEN in case the
+ * parameter is not present in the device tree.
+ */
+ plat->maxmtu = JUMBO_LEN;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -60,6 +115,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
of_device_is_compatible(np, "snps,dwmac")) {
+ /* Note that the max-frame-size parameter as defined in the
+ * ePAPR v1.1 spec is defined as max-frame-size, it's
+ * actually used as the IEEE definition of MAC Client
+ * data, or MTU. The ePAPR specification is confusing as
+ * the definition is max-frame-size, but usage examples
+ * are clearly MTUs
+ */
+ of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -140,17 +203,24 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
}
}
+ /* Custom setup (if needed) */
+ if (plat_dat->setup) {
+ plat_dat->bsp_priv = plat_dat->setup(pdev);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+ }
+
/* Custom initialisation (if needed)*/
if (plat_dat->init) {
- ret = plat_dat->init(pdev);
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
if (unlikely(ret))
return ret;
}
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- return -ENODEV;
+ return PTR_ERR(priv);
}
/* Get MAC address if available (DT) */
@@ -199,7 +269,10 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
int ret = stmmac_dvr_remove(ndev);
if (priv->plat->exit)
- priv->plat->exit(pdev);
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
+
+ if (priv->plat->free)
+ priv->plat->free(pdev, priv->plat->bsp_priv);
return ret;
}
@@ -207,64 +280,34 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int stmmac_pltfr_suspend(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pltfr_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_resume(ndev);
-}
-
-int stmmac_pltfr_freeze(struct device *dev)
-{
int ret;
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_freeze(ndev);
- if (plat_dat->exit)
- plat_dat->exit(pdev);
+ ret = stmmac_suspend(ndev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
}
-int stmmac_pltfr_restore(struct device *dev)
+static int stmmac_pltfr_resume(struct device *dev)
{
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (plat_dat->init)
- plat_dat->init(pdev);
+ if (priv->plat->init)
+ priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_restore(ndev);
+ return stmmac_resume(ndev);
}
-static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- .suspend = stmmac_pltfr_suspend,
- .resume = stmmac_pltfr_resume,
- .freeze = stmmac_pltfr_freeze,
- .thaw = stmmac_pltfr_restore,
- .restore = stmmac_pltfr_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
-static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
+ stmmac_pltfr_suspend, stmmac_pltfr_resume);
struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b4d50d74ba18..df8d383acf48 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f57..882ce168a799 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* vendor id: 0x108E (Sun Microsystems, Inc.)
* device id: 0xabba (Cassini)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 388540fcb977..8e2266e1f260 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3493,10 +3493,12 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
rh = (struct rx_pkt_hdr1 *) skb->data;
if (np->dev->features & NETIF_F_RXHASH)
- skb->rxhash = ((u32)rh->hashval2_0 << 24 |
- (u32)rh->hashval2_1 << 16 |
- (u32)rh->hashval1_1 << 8 |
- (u32)rh->hashval1_2 << 0);
+ skb_set_hash(skb,
+ ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0),
+ PKT_HASH_TYPE_L3);
skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 7217ee5d6273..206c1063815a 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -13,7 +13,6 @@
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b5655b79bd3b..c2799dc46325 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -24,7 +24,6 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3df56840a3b9..1c24a8f368bd 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -751,7 +751,7 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
struct vnet_mcast_entry *m;
for (m = vp->mcast_list; m; m = m->next) {
- if (!memcmp(m->addr, addr, ETH_ALEN))
+ if (ether_addr_equal(m->addr, addr))
return m;
}
return NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4f1d2549130e..2ead87759ab4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1764,7 +1764,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
/* We reclaimed resources, so in case the Q is stopped by xmit callback,
- * we resume the transmition and use tx_lock to synchronize with xmit.*/
+ * we resume the transmission and use tx_lock to synchronize with xmit.*/
spin_lock(&priv->tx_lock);
priv->tx_level += tx_level;
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 2dc16b6efaf0..73f74f369437 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -17,7 +17,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5330fd298705..bde63e3af96f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -541,14 +541,93 @@ static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
return slave_num;
}
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_ale *ale = priv->ale;
+ int i;
+
+ if (priv->data.dual_emac) {
+ bool flag = false;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i <= priv->data.slaves; i++)
+ if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+ flag = true;
+
+ if (!enable && flag) {
+ enable = true;
+ dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ } else {
+ if (enable) {
+ unsigned long timeout = jiffies + HZ;
+
+ /* Disable Learn for all ports */
+ for (i = 0; i <= priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 1);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 1);
+ }
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ break;
+ } while (time_after(timeout, jiffies));
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
+ priv->host_port);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+ /* Enable Learn for all ports */
+ for (i = 0; i <= priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 0);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 0);
+ }
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ }
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
- dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+ cpsw_set_promiscious(ndev, true);
return;
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
}
/* Clear all mcast from ALE */
@@ -582,7 +661,7 @@ static void cpsw_intr_disable(struct cpsw_priv *priv)
return;
}
-void cpsw_tx_handler(void *token, int len, int status)
+static void cpsw_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
@@ -599,7 +678,7 @@ void cpsw_tx_handler(void *token, int len, int status)
dev_kfree_skb_any(skb);
}
-void cpsw_rx_handler(void *token, int len, int status)
+static void cpsw_rx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct sk_buff *new_skb;
@@ -1257,29 +1336,6 @@ fail:
return NETDEV_TX_BUSY;
}
-static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
-{
- /*
- * The switch cannot operate in promiscuous mode without substantial
- * headache. For promiscuous mode to work, we would need to put the
- * ALE in bypass mode and route all traffic to the host port.
- * Subsequently, the host will need to operate as a "bridge", learn,
- * and flood as needed. For now, we simply complain here and
- * do nothing about it :-)
- */
- if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
- dev_err(&ndev->dev, "promiscuity ignored!\n");
-
- /*
- * The switch cannot filter multicast traffic unless it is configured
- * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
- * whole bunch of additional logic that this driver does not implement
- * at present.
- */
- if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
- dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
-}
-
#ifdef CONFIG_TI_CPTS
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
@@ -1331,7 +1387,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
}
-static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpts *cpts = priv->cpts;
@@ -1392,6 +1448,24 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpts *cpts = priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (priv->version != CPSW_VERSION_1 &&
+ priv->version != CPSW_VERSION_2)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts->rx_enable ?
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1406,7 +1480,9 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
switch (cmd) {
#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_ioctl(dev, req);
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
#endif
case SIOCGMIIPHY:
data->phy_id = priv->slaves[slave_no].phy->addr;
@@ -1555,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
- .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -2137,8 +2212,8 @@ static int cpsw_probe(struct platform_device *pdev)
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
- cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- ss_res->start, ndev->irq);
+ cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+ &ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7fa60d6092ed..7f893069c418 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -163,7 +163,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
if (cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
- if (memcmp(entry_addr, addr, 6) == 0)
+ if (ether_addr_equal(entry_addr, addr))
return idx;
}
return -ENOENT;
@@ -477,6 +477,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_P0_UNI_FLOOD] = {
+ .name = "port0_unicast_flood",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_VLAN_NOLEARN] = {
.name = "vlan_nolearn",
.offset = ALE_CONTROL,
@@ -573,6 +581,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_PORT_NO_SA_UPDATE] = {
+ .name = "no_source_update",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 30daa1265f0c..de409c33b250 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -34,6 +34,7 @@ enum cpsw_ale_control {
ALE_ENABLE,
ALE_CLEAR,
ALE_AGEOUT,
+ ALE_P0_UNI_FLOOD,
ALE_VLAN_NOLEARN,
ALE_NO_PORT_VLAN,
ALE_OUI_DENY,
@@ -47,6 +48,7 @@ enum cpsw_ale_control {
ALE_PORT_DROP_UNTAGGED,
ALE_PORT_DROP_UNKNOWN_VLAN,
ALE_PORT_NOLEARN,
+ ALE_PORT_NO_SA_UPDATE,
ALE_PORT_UNKNOWN_VLAN_MEMBER,
ALE_PORT_UNKNOWN_MCAST_FLOOD,
ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 90a79462c869..364d0c7952c0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -81,7 +81,7 @@ struct cpdma_desc {
};
struct cpdma_desc_pool {
- u32 phys;
+ phys_addr_t phys;
u32 hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
@@ -219,8 +219,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
{
if (!desc)
return 0;
- return pool->hw_addr + (__force dma_addr_t)desc -
- (__force dma_addr_t)pool->iomap;
+ return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
}
static inline struct cpdma_desc __iomem *
@@ -972,7 +971,7 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
-struct cpdma_control_info controls[] = {
+static struct cpdma_control_info controls[] = {
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4ec92659a100..0cca9dec5d82 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -82,7 +82,7 @@ struct davinci_mdio_regs {
} user[0];
};
-struct mdio_platform_data default_pdata = {
+static const struct mdio_platform_data default_pdata = {
.bus_freq = DEF_OUT_FREQ,
};
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 4083ba8839e1..f59a6c265331 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,20 +9,10 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
+ select PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
To compile this driver as a module, choose M here: the module
will be called tile_net.
-
-config PTP_1588_CLOCK_TILEGX
- tristate "Tilera TILE-Gx mPIPE as PTP clock"
- select PTP_1588_CLOCK
- depends on TILE_NET
- depends on TILEGX
- ---help---
- This driver adds support for using the mPIPE as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0e9fb3301b11..023237a65720 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -187,10 +187,8 @@ struct tile_net_priv {
int echannel;
/* mPIPE instance, 0 or 1. */
int instance;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* The timestamp config. */
struct hwtstamp_config stamp_cfg;
-#endif
};
static struct mpipe_data {
@@ -229,14 +227,12 @@ static struct mpipe_data {
int first_bucket;
int num_buckets;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* PTP-specific data. */
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
/* Lock for ptp accessors. */
struct mutex ptp_lock;
-#endif
} mpipe_data[NR_MPIPE_MAX] = {
[0 ... (NR_MPIPE_MAX - 1)] {
@@ -451,20 +447,17 @@ static void tile_net_provide_needed_buffers(void)
static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
idesc->time_stamp_ns);
}
-#endif
}
/* Get TX timestamp, and store it in the skb. */
static void tile_tx_timestamp(struct sk_buff *skb, int instance)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct skb_shared_info *shtx = skb_shinfo(skb);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
@@ -477,14 +470,11 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
}
/* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
+static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct hwtstamp_config config;
struct tile_net_priv *priv = netdev_priv(dev);
@@ -530,9 +520,17 @@ static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
priv->stamp_cfg = config;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
+}
+
+static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
+ sizeof(priv->stamp_cfg)))
+ return -EFAULT;
+
+ return 0;
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -814,8 +812,6 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
-
/* PTP clock operations. */
static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -882,12 +878,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.enable = ptp_mpipe_enable,
};
-#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
-
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct timespec ts;
getnstimeofday(&ts);
@@ -899,16 +892,13 @@ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
if (IS_ERR(md->ptp_clock))
netdev_err(dev, "ptp_clock_register failed %ld\n",
PTR_ERR(md->ptp_clock));
-#endif
}
/* Initialize PTP fields in a new device. */
static void init_ptp_dev(struct tile_net_priv *priv)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-#endif
}
/* Helper functions for "tile_net_update()". */
@@ -2099,7 +2089,9 @@ static void tile_net_tx_timeout(struct net_device *dev)
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
if (cmd == SIOCSHWTSTAMP)
- return tile_hwtstamp_ioctl(dev, rq, cmd);
+ return tile_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return tile_hwtstamp_get(dev, rq);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index f7f2ef49c0c1..d899d0072ae0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1739,12 +1739,14 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
GELIC_CARD_PORT_STATUS_CHANGED;
- if (gelic_card_init_chain(card, &card->tx_chain,
- card->descr, GELIC_NET_TX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->tx_chain,
+ card->descr, GELIC_NET_TX_DESCRIPTORS);
+ if (result)
goto fail_alloc_tx;
- if (gelic_card_init_chain(card, &card->rx_chain,
- card->descr + GELIC_NET_TX_DESCRIPTORS,
- GELIC_NET_RX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
+ GELIC_NET_RX_DESCRIPTORS);
+ if (result)
goto fail_alloc_rx;
/* head of chain */
@@ -1754,7 +1756,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
card->rx_top, card->tx_top, sizeof(struct gelic_descr),
GELIC_NET_RX_DESCRIPTORS);
/* allocate rx skbs */
- if (gelic_card_alloc_rx_skbs(card))
+ result = gelic_card_alloc_rx_skbs(card);
+ if (result)
goto fail_alloc_skbs;
spin_lock_init(&card->tx_lock);
@@ -1772,7 +1775,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
}
#ifdef CONFIG_GELIC_WIRELESS
- if (gelic_wl_driver_probe(card)) {
+ result = gelic_wl_driver_probe(card);
+ if (result) {
dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
goto fail_setup_netdev;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 1322546d92ac..88e9c73cebc0 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -38,7 +38,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -1170,19 +1169,12 @@ static int tc35815_tx_full(struct net_device *dev)
static void tc35815_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
+ int ret;
if (lp->phy_dev) {
- int timeout;
-
- phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
- timeout = 100;
- while (--timeout) {
- if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
- break;
- udelay(1);
- }
- if (!timeout)
- printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+ ret = phy_init_hw(lp->phy_dev);
+ if (ret)
+ printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
spin_lock_bh(&lp->rx_lock);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c4dbf981804b..47eeb3abf7f7 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/net.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6d..4a03c594b2b1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -16,9 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2166e879a096..a4347508031c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f9293da19e26..1ec65feebb9e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -22,7 +22,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fefb8cd5eb65..36052b98b3fc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bdd20b888cf6..7c81ffb861e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ALTERNATIVELY, this driver may be distributed under the terms of
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index bcc224a83734..25283f17d82f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -373,7 +373,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
@@ -417,6 +417,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ struct port *port = netdev_priv(netdev);
+
+ cfg.flags = 0;
+ cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (port->hwts_rx_en) {
+ case 0:
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case PTP_SLAVE_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case PTP_MASTER_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd)
{
@@ -959,8 +985,12 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(dev, req, cmd);
+ if (cpu_is_ixp46x()) {
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_set(dev, req);
+ if (cmd == SIOCGHWTSTAMP)
+ return hwtstamp_get(dev, req);
+ }
return phy_mii_ioctl(port->phydev, req, cmd);
}